diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index eb663978..7bd7b7f6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -57,8 +57,7 @@ build_cloudflared_macos: &build - '[ "${RUNNER_ARCH}" = "intel" ] && export TARGET_ARCH=amd64' - ARCH=$(uname -m) - echo ARCH=$ARCH - TARGET_ARCH=$TARGET_ARCH - - ./.teamcity/mac/install-cloudflare-go.sh - - export PATH="/tmp/go/bin:$PATH" + - ./.teamcity/mac/install-go.sh - BUILD_SCRIPT=.teamcity/mac/build.sh - if [[ ! -x ${BUILD_SCRIPT} ]] ; then exit ; fi - set -euo pipefail diff --git a/.golangci.yaml b/.golangci.yaml index 04900a0e..615dc280 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -27,7 +27,7 @@ linters: - sloglint # Ensure consistent code style when using log/slog. - sqlclosecheck # Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed. - staticcheck # It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary. - - tenv # Tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17. + - usetesting # Reports uses of functions with replacement inside the testing package. - testableexamples # Linter checks if examples are testable (have an expected output). - testifylint # Checks usage of github.com/stretchr/testify. - tparallel # Tparallel detects inappropriate usage of t.Parallel() method in your Go test codes. diff --git a/.teamcity/mac/install-cloudflare-go.sh b/.teamcity/mac/install-cloudflare-go.sh index bab9fdeb..b688ed43 100755 --- a/.teamcity/mac/install-cloudflare-go.sh +++ b/.teamcity/mac/install-cloudflare-go.sh @@ -7,4 +7,4 @@ rm -rf $GOCACHE export PATH="/tmp/go/bin:$PATH" go version which go -go env \ No newline at end of file +go env diff --git a/.teamcity/mac/install-go.sh b/.teamcity/mac/install-go.sh new file mode 100755 index 00000000..17c084da --- /dev/null +++ b/.teamcity/mac/install-go.sh @@ -0,0 +1,10 @@ +rm -rf /tmp/go +export GOCACHE=/tmp/gocache +rm -rf $GOCACHE + +brew install go@1.24 + +go version +which go +go env + diff --git a/.teamcity/windows/component-test.ps1 b/.teamcity/windows/component-test.ps1 index 548fac6b..f8e716d0 100644 --- a/.teamcity/windows/component-test.ps1 +++ b/.teamcity/windows/component-test.ps1 @@ -32,7 +32,7 @@ Write-Output "Running unit tests" # Not testing with race detector because of https://github.com/golang/go/issues/61058 # We already test it on other platforms -& go test -failfast -mod=vendor ./... +go test -failfast -v -mod=vendor ./... if ($LASTEXITCODE -ne 0) { throw "Failed unit tests" } Write-Output "Running component tests" @@ -44,4 +44,4 @@ if ($LASTEXITCODE -ne 0) { python component-tests/setup.py --type cleanup throw "Failed component tests" } -python component-tests/setup.py --type cleanup \ No newline at end of file +python component-tests/setup.py --type cleanup diff --git a/Dockerfile b/Dockerfile index a90910a0..1f1387d4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # use a builder image for building cloudflare ARG TARGET_GOOS ARG TARGET_GOARCH -FROM golang:1.22.10 as builder +FROM golang:1.24.2 AS builder ENV GO111MODULE=on \ CGO_ENABLED=0 \ TARGET_GOOS=${TARGET_GOOS} \ @@ -16,10 +16,8 @@ WORKDIR /go/src/github.com/cloudflare/cloudflared/ # copy our sources into the builder image COPY . . -RUN .teamcity/install-cloudflare-go.sh - # compile cloudflared -RUN PATH="/tmp/go/bin:$PATH" make cloudflared +RUN make cloudflared # use a distroless base image with glibc FROM gcr.io/distroless/base-debian12:nonroot diff --git a/Dockerfile.amd64 b/Dockerfile.amd64 index 20c48bd4..5050d170 100644 --- a/Dockerfile.amd64 +++ b/Dockerfile.amd64 @@ -1,5 +1,5 @@ # use a builder image for building cloudflare -FROM golang:1.22.10 as builder +FROM golang:1.24.2 AS builder ENV GO111MODULE=on \ CGO_ENABLED=0 \ # the CONTAINER_BUILD envvar is used set github.com/cloudflare/cloudflared/metrics.Runtime=virtual @@ -11,10 +11,8 @@ WORKDIR /go/src/github.com/cloudflare/cloudflared/ # copy our sources into the builder image COPY . . -RUN .teamcity/install-cloudflare-go.sh - # compile cloudflared -RUN GOOS=linux GOARCH=amd64 PATH="/tmp/go/bin:$PATH" make cloudflared +RUN GOOS=linux GOARCH=amd64 make cloudflared # use a distroless base image with glibc FROM gcr.io/distroless/base-debian12:nonroot diff --git a/Dockerfile.arm64 b/Dockerfile.arm64 index f4b25a6e..cd731126 100644 --- a/Dockerfile.arm64 +++ b/Dockerfile.arm64 @@ -1,5 +1,5 @@ # use a builder image for building cloudflare -FROM golang:1.22.10 as builder +FROM golang:1.24.2 AS builder ENV GO111MODULE=on \ CGO_ENABLED=0 \ # the CONTAINER_BUILD envvar is used set github.com/cloudflare/cloudflared/metrics.Runtime=virtual @@ -11,10 +11,8 @@ WORKDIR /go/src/github.com/cloudflare/cloudflared/ # copy our sources into the builder image COPY . . -RUN .teamcity/install-cloudflare-go.sh - # compile cloudflared -RUN GOOS=linux GOARCH=arm64 PATH="/tmp/go/bin:$PATH" make cloudflared +RUN GOOS=linux GOARCH=arm64 make cloudflared # use a distroless base image with glibc FROM gcr.io/distroless/base-debian12:nonroot-arm64 diff --git a/cfsetup.yaml b/cfsetup.yaml index 3c972679..cfd7885c 100644 --- a/cfsetup.yaml +++ b/cfsetup.yaml @@ -1,4 +1,4 @@ -pinned_go: &pinned_go go-boring=1.22.10-1 +pinned_go: &pinned_go go-boring=1.24.2-1 build_dir: &build_dir /cfsetup_build default-flavor: bookworm @@ -13,7 +13,7 @@ bullseye: &bullseye - rubygem-fpm - rpm - libffi-dev - - golangci-lint + - golangci-lint=1.64.8-2 pre-cache: &build_pre_cache - export GOCACHE=/cfsetup_build/.cache/go-build - go install golang.org/x/tools/cmd/goimports@v0.30.0 diff --git a/cmd/cloudflared/linux_service.go b/cmd/cloudflared/linux_service.go index 4e68af17..a950bb9f 100644 --- a/cmd/cloudflared/linux_service.go +++ b/cmd/cloudflared/linux_service.go @@ -4,6 +4,7 @@ package main import ( "fmt" + "io" "os" "github.com/rs/zerolog" @@ -15,7 +16,7 @@ import ( "github.com/cloudflare/cloudflared/logger" ) -func runApp(app *cli.App, graceShutdownC chan struct{}) { +func runApp(app *cli.App, _ chan struct{}) { app.Commands = append(app.Commands, &cli.Command{ Name: "service", Usage: "Manages the cloudflared system service", @@ -35,7 +36,7 @@ func runApp(app *cli.App, graceShutdownC chan struct{}) { }, }, }) - app.Run(os.Args) + _ = app.Run(os.Args) } // The directory and files that are used by the service. @@ -97,6 +98,7 @@ WantedBy=timers.target var sysvTemplate = ServiceTemplate{ Path: "/etc/init.d/cloudflared", FileMode: 0755, + // nolint: dupword Content: `#!/bin/sh # For RedHat and cousins: # chkconfig: 2345 99 01 @@ -184,13 +186,11 @@ exit 0 `, } -var ( - noUpdateServiceFlag = &cli.BoolFlag{ - Name: "no-update-service", - Usage: "Disable auto-update of the cloudflared linux service, which restarts the server to upgrade for new versions.", - Value: false, - } -) +var noUpdateServiceFlag = &cli.BoolFlag{ + Name: "no-update-service", + Usage: "Disable auto-update of the cloudflared linux service, which restarts the server to upgrade for new versions.", + Value: false, +} func isSystemd() bool { if _, err := os.Stat("/run/systemd/system"); err == nil { @@ -430,3 +430,38 @@ func uninstallSysv(log *zerolog.Logger) error { } return nil } + +func ensureConfigDirExists(configDir string) error { + ok, err := config.FileExists(configDir) + if !ok && err == nil { + err = os.Mkdir(configDir, 0755) + } + return err +} + +func copyFile(src, dest string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer srcFile.Close() + + destFile, err := os.Create(dest) + if err != nil { + return err + } + ok := false + defer func() { + destFile.Close() + if !ok { + _ = os.Remove(dest) + } + }() + + if _, err := io.Copy(destFile, srcFile); err != nil { + return err + } + + ok = true + return nil +} diff --git a/cmd/cloudflared/service_template.go b/cmd/cloudflared/service_template.go index 5fb9ae12..124e75a1 100644 --- a/cmd/cloudflared/service_template.go +++ b/cmd/cloudflared/service_template.go @@ -11,8 +11,6 @@ import ( "text/template" homedir "github.com/mitchellh/go-homedir" - - "github.com/cloudflare/cloudflared/config" ) type ServiceTemplate struct { @@ -109,38 +107,3 @@ func runCommand(command string, args ...string) error { } return nil } - -func ensureConfigDirExists(configDir string) error { - ok, err := config.FileExists(configDir) - if !ok && err == nil { - err = os.Mkdir(configDir, 0755) - } - return err -} - -func copyFile(src, dest string) error { - srcFile, err := os.Open(src) - if err != nil { - return err - } - defer srcFile.Close() - - destFile, err := os.Create(dest) - if err != nil { - return err - } - ok := false - defer func() { - destFile.Close() - if !ok { - _ = os.Remove(dest) - } - }() - - if _, err := io.Copy(destFile, srcFile); err != nil { - return err - } - - ok = true - return nil -} diff --git a/cmd/cloudflared/tunnel/subcommands.go b/cmd/cloudflared/tunnel/subcommands.go index 1700b696..4be655a0 100644 --- a/cmd/cloudflared/tunnel/subcommands.go +++ b/cmd/cloudflared/tunnel/subcommands.go @@ -761,7 +761,7 @@ func runCommand(c *cli.Context) error { if tokenFile := c.String(TunnelTokenFileFlag); tokenFile != "" { data, err := os.ReadFile(tokenFile) if err != nil { - return cliutil.UsageError("Failed to read token file: " + err.Error()) + return cliutil.UsageError("Failed to read token file: %s", err.Error()) } tokenStr = strings.TrimSpace(string(data)) } diff --git a/cmd/cloudflared/windows_service.go b/cmd/cloudflared/windows_service.go index f5594553..3c90cf6f 100644 --- a/cmd/cloudflared/windows_service.go +++ b/cmd/cloudflared/windows_service.go @@ -190,7 +190,7 @@ func installWindowsService(c *cli.Context) error { log := zeroLogger.With().Str(LogFieldWindowsServiceName, windowsServiceName).Logger() if err == nil { s.Close() - return fmt.Errorf(serviceAlreadyExistsWarn(windowsServiceName)) + return errors.New(serviceAlreadyExistsWarn(windowsServiceName)) } extraArgs, err := getServiceExtraArgsFromCliArgs(c, &log) if err != nil { @@ -238,7 +238,7 @@ func uninstallWindowsService(c *cli.Context) error { defer m.Disconnect() s, err := m.OpenService(windowsServiceName) if err != nil { - return fmt.Errorf("Agent service %s is not installed, so it could not be uninstalled", windowsServiceName) + return fmt.Errorf("agent service %s is not installed, so it could not be uninstalled", windowsServiceName) } defer s.Close() diff --git a/connection/http2_test.go b/connection/http2_test.go index a1b01563..5e99ca8b 100644 --- a/connection/http2_test.go +++ b/connection/http2_test.go @@ -27,13 +27,11 @@ import ( "github.com/cloudflare/cloudflared/tunnelrpc/pogs" ) -var ( - testTransport = http2.Transport{} -) +var testTransport = http2.Transport{} func newTestHTTP2Connection() (*HTTP2Connection, net.Conn) { edgeConn, cfdConn := net.Pipe() - var connIndex = uint8(0) + connIndex := uint8(0) log := zerolog.Nop() obs := NewObserver(&log, &log) controlStream := NewControlStream( @@ -63,7 +61,7 @@ func newTestHTTP2Connection() (*HTTP2Connection, net.Conn) { func TestHTTP2ConfigurationSet(t *testing.T) { http2Conn, edgeConn := newTestHTTP2Connection() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) var wg sync.WaitGroup wg.Add(1) go func() { @@ -131,7 +129,7 @@ func TestServeHTTP(t *testing.T) { http2Conn, edgeConn := newTestHTTP2Connection() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) var wg sync.WaitGroup wg.Add(1) go func() { @@ -262,7 +260,7 @@ func (w *wsRespWriter) close() { func TestServeWS(t *testing.T) { http2Conn, _ := newTestHTTP2Connection() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) respWriter := newWSRespWriter() readPipe, writePipe := io.Pipe() @@ -302,7 +300,7 @@ func TestServeWS(t *testing.T) { func TestNoWriteAfterServeHTTPReturns(t *testing.T) { cfdHTTP2Conn, edgeTCPConn := newTestHTTP2Connection() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) var wg sync.WaitGroup serverDone := make(chan struct{}) @@ -380,7 +378,7 @@ func TestServeControlStream(t *testing.T) { ) http2Conn.controlStreamHandler = controlStream - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) var wg sync.WaitGroup wg.Add(1) go func() { @@ -434,7 +432,7 @@ func TestFailRegistration(t *testing.T) { ) http2Conn.controlStreamHandler = controlStream - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) var wg sync.WaitGroup wg.Add(1) go func() { @@ -485,7 +483,7 @@ func TestGracefulShutdownHTTP2(t *testing.T) { http2Conn.controlStreamHandler = controlStream - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) var wg sync.WaitGroup wg.Add(1) go func() { @@ -535,7 +533,7 @@ func TestGracefulShutdownHTTP2(t *testing.T) { } func TestServeTCP_RateLimited(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) http2Conn, edgeConn := newTestHTTP2Connection() var wg sync.WaitGroup @@ -567,7 +565,7 @@ func TestServeTCP_RateLimited(t *testing.T) { func benchmarkServeHTTP(b *testing.B, test testRequest) { http2Conn, edgeConn := newTestHTTP2Connection() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(b.Context()) var wg sync.WaitGroup wg.Add(1) go func() { diff --git a/connection/quic_connection_test.go b/connection/quic_connection_test.go index 8027447f..251f8630 100644 --- a/connection/quic_connection_test.go +++ b/connection/quic_connection_test.go @@ -60,7 +60,7 @@ func TestQUICServer(t *testing.T) { err := wsutil.WriteClientBinary(wsBuf, []byte("Hello")) require.NoError(t, err) - var tests = []struct { + tests := []struct { desc string dest string connectionType pogs.ConnectionType @@ -150,7 +150,7 @@ func TestQUICServer(t *testing.T) { for i, test := range tests { test := test // capture range variable t.Run(test.desc, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) // Start a UDP Listener for QUIC. udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") require.NoError(t, err) @@ -194,6 +194,7 @@ func (fakeControlStream) ServeControlStream(ctx context.Context, rw io.ReadWrite <-ctx.Done() return nil } + func (fakeControlStream) IsStopped() bool { return true } @@ -211,7 +212,7 @@ func quicServer( session, err := listener.Accept(ctx) require.NoError(t, err) - quicStream, err := session.OpenStreamSync(context.Background()) + quicStream, err := session.OpenStreamSync(t.Context()) require.NoError(t, err) stream := cfdquic.NewSafeStreamCloser(quicStream, defaultQUICTimeout, &log) @@ -278,7 +279,7 @@ func (moc *mockOriginProxyWithRequest) ProxyHTTP(w ResponseWriter, tr *tracing.T } func TestBuildHTTPRequest(t *testing.T) { - var tests = []struct { + tests := []struct { name string connectRequest *pogs.ConnectRequest body io.ReadCloser @@ -499,7 +500,7 @@ func TestBuildHTTPRequest(t *testing.T) { for _, test := range tests { test := test // capture range variable t.Run(test.name, func(t *testing.T) { - req, err := buildHTTPRequest(context.Background(), test.connectRequest, test.body, 0, &log) + req, err := buildHTTPRequest(t.Context(), test.connectRequest, test.body, 0, &log) require.NoError(t, err) test.req = test.req.WithContext(req.Context()) require.Equal(t, test.req, req.Request) @@ -525,7 +526,7 @@ func TestServeUDPSession(t *testing.T) { require.NoError(t, err) defer udpListener.Close() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) // Establish QUIC connection with edge edgeQUICSessionChan := make(chan quic.Connection) @@ -606,7 +607,7 @@ func TestCreateUDPConnReuseSourcePort(t *testing.T) { // TestTCPProxy_FlowRateLimited tests if the pogs.ConnectResponse returns the expected error and metadata, when a // new flow is rate limited. func TestTCPProxy_FlowRateLimited(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) // Start a UDP Listener for QUIC. udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") @@ -627,7 +628,7 @@ func TestTCPProxy_FlowRateLimited(t *testing.T) { session, err := quicListener.Accept(ctx) assert.NoError(t, err) - quicStream, err := session.OpenStreamSync(context.Background()) + quicStream, err := session.OpenStreamSync(t.Context()) assert.NoError(t, err) stream := cfdquic.NewSafeStreamCloser(quicStream, defaultQUICTimeout, &log) @@ -688,9 +689,7 @@ func testCreateUDPConnReuseSourcePortForEdgeIP(t *testing.T, edgeIP netip.AddrPo } func serveSession(ctx context.Context, datagramConn *datagramV2Connection, edgeQUICSession quic.Connection, closeType closeReason, expectedReason string, t *testing.T) { - var ( - payload = []byte(t.Name()) - ) + payload := []byte(t.Name()) sessionID := uuid.New() cfdConn, originConn := net.Pipe() // Registers and run a new session @@ -803,7 +802,7 @@ func testTunnelConnection(t *testing.T, serverAddr netip.AddrPort, index uint8) } // Start a mock httpProxy log := zerolog.New(io.Discard) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Dial the QUIC connection to the edge diff --git a/connection/quic_datagram_v2_test.go b/connection/quic_datagram_v2_test.go index 737a2e9b..af58ffdb 100644 --- a/connection/quic_datagram_v2_test.go +++ b/connection/quic_datagram_v2_test.go @@ -16,8 +16,7 @@ import ( "github.com/cloudflare/cloudflared/mocks" ) -type mockQuicConnection struct { -} +type mockQuicConnection struct{} func (m *mockQuicConnection) AcceptStream(_ context.Context) (quic.Stream, error) { return nil, nil @@ -71,6 +70,10 @@ func (m *mockQuicConnection) ReceiveDatagram(_ context.Context) ([]byte, error) return nil, nil } +func (m *mockQuicConnection) AddPath(*quic.Transport) (*quic.Path, error) { + return nil, nil +} + func TestRateLimitOnNewDatagramV2UDPSession(t *testing.T) { log := zerolog.Nop() conn := &mockQuicConnection{} @@ -78,7 +81,7 @@ func TestRateLimitOnNewDatagramV2UDPSession(t *testing.T) { flowLimiterMock := mocks.NewMockLimiter(ctrl) datagramConn := NewDatagramV2Connection( - context.Background(), + t.Context(), conn, nil, 0, @@ -91,6 +94,6 @@ func TestRateLimitOnNewDatagramV2UDPSession(t *testing.T) { flowLimiterMock.EXPECT().Acquire("udp").Return(cfdflow.ErrTooManyActiveFlows) flowLimiterMock.EXPECT().Release().Times(0) - _, err := datagramConn.RegisterUdpSession(context.Background(), uuid.New(), net.IPv4(0, 0, 0, 0), 1000, 1*time.Second, "") + _, err := datagramConn.RegisterUdpSession(t.Context(), uuid.New(), net.IPv4(0, 0, 0, 0), 1000, 1*time.Second, "") require.ErrorIs(t, err, cfdflow.ErrTooManyActiveFlows) } diff --git a/datagramsession/session_test.go b/datagramsession/session_test.go index b8d5308b..74113644 100644 --- a/datagramsession/session_test.go +++ b/datagramsession/session_test.go @@ -35,12 +35,10 @@ func TestCloseIdle(t *testing.T) { } func testSessionReturns(t *testing.T, closeBy closeMethod, closeAfterIdle time.Duration) { - var ( - localCloseReason = &errClosedSession{ - message: "connection closed by origin", - byRemote: false, - } - ) + localCloseReason := &errClosedSession{ + message: "connection closed by origin", + byRemote: false, + } sessionID := uuid.New() cfdConn, originConn := net.Pipe() payload := testPayload(sessionID) @@ -49,7 +47,7 @@ func testSessionReturns(t *testing.T, closeBy closeMethod, closeAfterIdle time.D mg := NewManager(&log, nil, nil) session := mg.newSession(sessionID, cfdConn) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) sessionDone := make(chan struct{}) go func() { closedByRemote, err := session.Serve(ctx, closeAfterIdle) @@ -128,7 +126,7 @@ func testActiveSessionNotClosed(t *testing.T, readFromDst bool, writeToDst bool) startTime := time.Now() activeUntil := startTime.Add(activeTime) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) errGroup, ctx := errgroup.WithContext(ctx) errGroup.Go(func() error { _, _ = session.Serve(ctx, closeAfterIdle) @@ -211,7 +209,7 @@ func TestZeroBytePayload(t *testing.T) { mg := NewManager(&nopLogger, sender.muxSession, nil) session := mg.newSession(sessionID, cfdConn) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) errGroup, ctx := errgroup.WithContext(ctx) errGroup.Go(func() error { // Read from underlying conn and send to transport diff --git a/dev.Dockerfile b/dev.Dockerfile deleted file mode 100644 index 8d15784f..00000000 --- a/dev.Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM golang:1.22.10 as builder -ENV GO111MODULE=on \ - CGO_ENABLED=0 -WORKDIR /go/src/github.com/cloudflare/cloudflared/ -RUN apt-get update -COPY . . -RUN .teamcity/install-cloudflare-go.sh -# compile cloudflared -RUN PATH="/tmp/go/bin:$PATH" make cloudflared -RUN cp /go/src/github.com/cloudflare/cloudflared/cloudflared /usr/local/bin/ diff --git a/features/selector_test.go b/features/selector_test.go index 96284d78..511aebe6 100644 --- a/features/selector_test.go +++ b/features/selector_test.go @@ -77,7 +77,7 @@ func TestFeaturePrecedenceEvaluationPostQuantum(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { resolver := &staticResolver{record: featuresRecord{}} - selector, err := newFeatureSelector(context.Background(), test.name, &logger, resolver, []string{}, test.cli, time.Second) + selector, err := newFeatureSelector(t.Context(), test.name, &logger, resolver, []string{}, test.cli, time.Second) require.NoError(t, err) snapshot := selector.Snapshot() require.ElementsMatch(t, test.expectedFeatures, snapshot.FeaturesList) @@ -121,7 +121,7 @@ func TestFeaturePrecedenceEvaluationDatagramVersion(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { resolver := &staticResolver{record: test.remote} - selector, err := newFeatureSelector(context.Background(), test.name, &logger, resolver, test.cli, false, time.Second) + selector, err := newFeatureSelector(t.Context(), test.name, &logger, resolver, test.cli, false, time.Second) require.NoError(t, err) snapshot := selector.Snapshot() require.ElementsMatch(t, test.expectedFeatures, snapshot.FeaturesList) @@ -155,7 +155,7 @@ func TestDeprecatedFeaturesRemoved(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { resolver := &staticResolver{record: test.remote} - selector, err := newFeatureSelector(context.Background(), test.name, &logger, resolver, test.cli, false, time.Second) + selector, err := newFeatureSelector(t.Context(), test.name, &logger, resolver, test.cli, false, time.Second) require.NoError(t, err) snapshot := selector.Snapshot() require.ElementsMatch(t, test.expectedFeatures, snapshot.FeaturesList) @@ -180,7 +180,7 @@ func TestRefreshFeaturesRecord(t *testing.T) { } // Manually progress the next refresh - _ = selector.refresh(context.Background()) + _ = selector.refresh(t.Context()) } // Make sure a resolver error doesn't override the last fetched features @@ -197,7 +197,7 @@ func TestSnapshotIsolation(t *testing.T) { require.Equal(t, DatagramV2, snapshot.DatagramVersion) // Manually progress the next refresh - _ = selector.refresh(context.Background()) + _ = selector.refresh(t.Context()) snapshot2 := selector.Snapshot() require.Equal(t, DatagramV3, snapshot2.DatagramVersion) @@ -224,7 +224,7 @@ func newTestSelector(t *testing.T, percentages []uint32, pq bool, refreshFreq ti percentages: percentages, } - selector, err := newFeatureSelector(context.Background(), testAccountTag, &logger, resolver, []string{}, pq, refreshFreq) + selector, err := newFeatureSelector(t.Context(), testAccountTag, &logger, resolver, []string{}, pq, refreshFreq) require.NoError(t, err) return selector diff --git a/go.mod b/go.mod index a5a24f1a..02204804 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/cloudflare/cloudflared -go 1.22 +go 1.24 require ( github.com/coredns/coredns v1.11.3 @@ -12,7 +12,7 @@ require ( github.com/getsentry/sentry-go v0.16.0 github.com/go-chi/chi/v5 v5.0.8 github.com/go-chi/cors v1.2.1 - github.com/go-jose/go-jose/v4 v4.0.1 + github.com/go-jose/go-jose/v4 v4.1.0 github.com/gobwas/ws v1.2.1 github.com/google/gopacket v1.1.19 github.com/google/uuid v1.6.0 @@ -24,9 +24,9 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.0 - github.com/quic-go/quic-go v0.45.0 + github.com/quic-go/quic-go v0.51.0 github.com/rs/zerolog v1.20.0 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/urfave/cli/v2 v2.3.0 go.opentelemetry.io/contrib/propagators v0.22.0 go.opentelemetry.io/otel v1.26.0 @@ -34,14 +34,14 @@ require ( go.opentelemetry.io/otel/sdk v1.26.0 go.opentelemetry.io/otel/trace v1.26.0 go.opentelemetry.io/proto/otlp v1.2.0 - go.uber.org/automaxprocs v1.4.0 - go.uber.org/mock v0.5.0 - golang.org/x/crypto v0.31.0 - golang.org/x/net v0.26.0 - golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 - golang.org/x/term v0.27.0 - google.golang.org/protobuf v1.34.1 + go.uber.org/automaxprocs v1.6.0 + go.uber.org/mock v0.5.1 + golang.org/x/crypto v0.37.0 + golang.org/x/net v0.39.0 + golang.org/x/sync v0.13.0 + golang.org/x/sys v0.32.0 + golang.org/x/term v0.31.0 + google.golang.org/protobuf v1.36.5 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v3 v3.0.1 nhooyr.io/websocket v1.8.7 @@ -61,35 +61,32 @@ require ( github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/pool v0.2.1 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect + github.com/google/pprof v0.0.0-20250418163039-24c5476c6587 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect github.com/klauspost/compress v1.15.11 // indirect - github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/onsi/ginkgo/v2 v2.13.0 // indirect + github.com/onsi/ginkgo/v2 v2.23.4 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/common v0.53.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect go.opentelemetry.io/otel/metric v1.26.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/tools v0.22.0 // indirect - google.golang.org/appengine v1.6.8 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/tools v0.32.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect google.golang.org/grpc v1.63.2 // indirect @@ -104,4 +101,4 @@ replace github.com/prometheus/golang_client => github.com/prometheus/golang_clie replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 // This fork is based on quic-go v0.45 -replace github.com/quic-go/quic-go => github.com/chungthuang/quic-go v0.45.1-0.20250128102735-2687bd175910 +replace github.com/quic-go/quic-go => github.com/chungthuang/quic-go v0.45.1-0.20250428085412-43229ad201fd diff --git a/go.sum b/go.sum index cb175989..920a766e 100644 --- a/go.sum +++ b/go.sum @@ -7,8 +7,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chungthuang/quic-go v0.45.1-0.20250128102735-2687bd175910 h1:/hTvBpxBDj/3NIzTodi1oEOyNBpirvgDSPKSV7VqAZU= -github.com/chungthuang/quic-go v0.45.1-0.20250128102735-2687bd175910/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= +github.com/chungthuang/quic-go v0.45.1-0.20250428085412-43229ad201fd h1:VdYI5zFQ2h1/qzoC6rhyPx479bkF8i177Qpg4Q2n1vk= +github.com/chungthuang/quic-go v0.45.1-0.20250428085412-43229ad201fd/go.mod h1:MFlGGpcpJqRAfmYi6NC2cptDPSxRWTOGNuP4wqrWmzQ= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/coredns v1.11.3 h1:8RjnpZc42db5th84/QJKH2i137ecJdzZK1HJwhetSPk= @@ -21,7 +21,6 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -55,11 +54,11 @@ github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= -github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY= +github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -72,8 +71,8 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= @@ -89,20 +88,17 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20250418163039-24c5476c6587 h1:b/8HpQhvKLSNzH5oTXN2WkNcMl6YB5K3FRbb+i+Ml34= +github.com/google/pprof v0.0.0-20250418163039-24c5476c6587/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -121,11 +117,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -150,10 +143,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= @@ -167,6 +160,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= @@ -187,10 +182,9 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= @@ -198,7 +192,6 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/contrib/propagators v0.22.0 h1:KGdv58M2//veiYLIhb31mofaI2LgkIPXXAZVeYVyfd8= go.opentelemetry.io/contrib/propagators v0.22.0/go.mod h1:xGOuXr6lLIF9BXipA4pm6UuOSI0M98U6tsI3khbOiwU= go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM= @@ -215,85 +208,60 @@ go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2L go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= -go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0= -go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= -go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= -go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/mock v0.5.1 h1:ASgazW/qBmR+A32MYFDB6E2POoTgOwT509VP0CT/fjs= +go.uber.org/mock v0.5.1/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ= google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= diff --git a/ingress/icmp_windows_test.go b/ingress/icmp_windows_test.go index fa102442..5cd53a30 100644 --- a/ingress/icmp_windows_test.go +++ b/ingress/icmp_windows_test.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && cgo package ingress diff --git a/ingress/ingress.go b/ingress/ingress.go index 60ee87ac..eaad7dce 100644 --- a/ingress/ingress.go +++ b/ingress/ingress.go @@ -113,7 +113,7 @@ func ParseIngressFromConfigAndCLI(conf *config.Configuration, c *cli.Context, lo // If no token is provided, the probability of NOT being a remotely managed tunnel is higher. // So, we should warn the user that no ingress rules were found, because remote configuration will most likely not exist. if !c.IsSet("token") { - log.Warn().Msgf(ErrNoIngressRulesCLI.Error()) + log.Warn().Msg(ErrNoIngressRulesCLI.Error()) } return newDefaultOrigin(c, log), nil } @@ -378,17 +378,17 @@ func validateHostname(r config.UnvalidatedIngressRule, ruleIndex, totalRules int } // ONLY the last rule should catch all hostnames. if !isLastRule && isCatchAllRule { - return errRuleShouldNotBeCatchAll{index: ruleIndex, hostname: r.Hostname} + return ruleShouldNotBeCatchAllError{index: ruleIndex, hostname: r.Hostname} } return nil } -type errRuleShouldNotBeCatchAll struct { +type ruleShouldNotBeCatchAllError struct { index int hostname string } -func (e errRuleShouldNotBeCatchAll) Error() string { +func (e ruleShouldNotBeCatchAllError) Error() string { return fmt.Sprintf("Rule #%d is matching the hostname '%s', but "+ "this will match every hostname, meaning the rules which follow it "+ "will never be triggered.", e.index+1, e.hostname) diff --git a/ingress/origin_proxy.go b/ingress/origin_proxy.go index 1caf28f0..7371eac9 100644 --- a/ingress/origin_proxy.go +++ b/ingress/origin_proxy.go @@ -66,7 +66,7 @@ func (o *httpService) SetOriginServerName(req *http.Request) { } return tls.Client(conn, &tls.Config{ RootCAs: o.transport.TLSClientConfig.RootCAs, - InsecureSkipVerify: o.transport.TLSClientConfig.InsecureSkipVerify, + InsecureSkipVerify: o.transport.TLSClientConfig.InsecureSkipVerify, // nolint: gosec ServerName: req.Host, }), nil } @@ -74,7 +74,7 @@ func (o *httpService) SetOriginServerName(req *http.Request) { func (o *statusCode) RoundTrip(_ *http.Request) (*http.Response, error) { if o.defaultResp { - o.log.Warn().Msgf(ErrNoIngressRulesCLI.Error()) + o.log.Warn().Msg(ErrNoIngressRulesCLI.Error()) } resp := &http.Response{ StatusCode: o.code, @@ -114,7 +114,6 @@ func (o *tcpOverWSService) EstablishConnection(ctx context.Context, dest string, streamHandler: o.streamHandler, } return originConn, nil - } func (o *socksProxyOverWSService) EstablishConnection(_ context.Context, _ string, _ *zerolog.Logger) (OriginConnection, error) { diff --git a/orchestration/orchestrator_test.go b/orchestration/orchestrator_test.go index a6e0755b..aeed4860 100644 --- a/orchestration/orchestrator_test.go +++ b/orchestration/orchestrator_test.go @@ -53,7 +53,7 @@ func TestUpdateConfiguration(t *testing.T) { initConfig := &Config{ Ingress: &ingress.Ingress{}, } - orchestrator, err := NewOrchestrator(context.Background(), initConfig, testTags, []ingress.Rule{ingress.NewManagementRule(management.New("management.argotunnel.com", false, "1.1.1.1:80", uuid.Nil, "", &testLogger, nil))}, &testLogger) + orchestrator, err := NewOrchestrator(t.Context(), initConfig, testTags, []ingress.Rule{ingress.NewManagementRule(management.New("management.argotunnel.com", false, "1.1.1.1:80", uuid.Nil, "", &testLogger, nil))}, &testLogger) require.NoError(t, err) initOriginProxy, err := orchestrator.GetOriginProxy() require.NoError(t, err) @@ -182,7 +182,7 @@ func TestUpdateConfiguration_FromMigration(t *testing.T) { initConfig := &Config{ Ingress: &ingress.Ingress{}, } - orchestrator, err := NewOrchestrator(context.Background(), initConfig, testTags, []ingress.Rule{}, &testLogger) + orchestrator, err := NewOrchestrator(t.Context(), initConfig, testTags, []ingress.Rule{}, &testLogger) require.NoError(t, err) initOriginProxy, err := orchestrator.GetOriginProxy() require.NoError(t, err) @@ -208,7 +208,7 @@ func TestUpdateConfiguration_WithoutIngressRule(t *testing.T) { initConfig := &Config{ Ingress: &ingress.Ingress{}, } - orchestrator, err := NewOrchestrator(context.Background(), initConfig, testTags, []ingress.Rule{}, &testLogger) + orchestrator, err := NewOrchestrator(t.Context(), initConfig, testTags, []ingress.Rule{}, &testLogger) require.NoError(t, err) initOriginProxy, err := orchestrator.GetOriginProxy() require.NoError(t, err) @@ -300,7 +300,7 @@ func TestConcurrentUpdateAndRead(t *testing.T) { } ) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() orchestrator, err := NewOrchestrator(ctx, initConfig, testTags, []ingress.Rule{}, &testLogger) @@ -393,7 +393,7 @@ func TestConcurrentUpdateAndRead(t *testing.T) { // TestOverrideWarpRoutingConfigWithLocalValues tests that if a value is defined in the Config.ConfigurationFlags, // it will override the value that comes from the remote result. func TestOverrideWarpRoutingConfigWithLocalValues(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() assertMaxActiveFlows := func(orchestrator *Orchestrator, expectedValue uint64) { @@ -580,7 +580,7 @@ func TestClosePreviousProxies(t *testing.T) { } ) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) orchestrator, err := NewOrchestrator(ctx, initConfig, testTags, []ingress.Rule{}, &testLogger) require.NoError(t, err) @@ -641,7 +641,7 @@ func TestPersistentConnection(t *testing.T) { initConfig := &Config{ Ingress: &ingress.Ingress{}, } - orchestrator, err := NewOrchestrator(context.Background(), initConfig, testTags, []ingress.Rule{}, &testLogger) + orchestrator, err := NewOrchestrator(t.Context(), initConfig, testTags, []ingress.Rule{}, &testLogger) require.NoError(t, err) wsOrigin := httptest.NewServer(http.HandlerFunc(wsEcho)) @@ -674,7 +674,7 @@ func TestPersistentConnection(t *testing.T) { tcpReqReader, tcpReqWriter := io.Pipe() tcpRespReadWriter := newRespReadWriteFlusher() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() var wg sync.WaitGroup diff --git a/proxy/proxy_test.go b/proxy/proxy_test.go index cfbb3926..56c9cab9 100644 --- a/proxy/proxy_test.go +++ b/proxy/proxy_test.go @@ -148,7 +148,7 @@ func (w *mockSSERespWriter) ReadBytes() []byte { func TestProxySingleOrigin(t *testing.T) { log := zerolog.Nop() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) flagSet := flag.NewFlagSet(t.Name(), flag.PanicOnError) flagSet.Bool("hello-world", true, "") @@ -190,7 +190,7 @@ func testProxyWebsocket(proxy connection.OriginProxy) func(t *testing.T) { return func(t *testing.T) { // WSRoute is a websocket echo handler const testTimeout = 5 * time.Second * 1000 - ctx, cancel := context.WithTimeout(context.Background(), testTimeout) + ctx, cancel := context.WithTimeout(t.Context(), testTimeout) defer cancel() readPipe, writePipe := io.Pipe() req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://localhost:8080%s", hello.WSRoute), readPipe) @@ -257,7 +257,7 @@ func testProxySSE(proxy connection.OriginProxy) func(t *testing.T) { pushFreq = time.Millisecond * 10 ) responseWriter := newMockSSERespWriter() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://localhost:8080%s?freq=%s", hello.SSERoute, pushFreq), nil) require.NoError(t, err) @@ -365,7 +365,7 @@ func runIngressTestScenarios(t *testing.T, unvalidatedIngress []config.Unvalidat log := zerolog.Nop() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) require.NoError(t, ingress.StartOrigins(&log, ctx.Done())) proxy := NewOriginProxy(ingress, noWarpRouting, testTags, cfdflow.NewLimiter(0), time.Duration(0), &log) @@ -494,7 +494,7 @@ func TestConnections(t *testing.T) { err bool } - var tests = []struct { + tests := []struct { name string args args want want @@ -686,7 +686,7 @@ func TestConnections(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) ln, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) // Starts origin service @@ -755,6 +755,7 @@ func newWSRequestBody(data []byte) *requestBody { pw: pw, } } + func newTCPRequestBody(data []byte) *requestBody { pr, pw := io.Pipe() go func() { @@ -973,12 +974,12 @@ func runEchoTCPService(t *testing.T, l net.Listener) { } func runEchoWSService(t *testing.T, l net.Listener) { - var upgrader = gorillaWS.Upgrader{ + upgrader := gorillaWS.Upgrader{ ReadBufferSize: 10, WriteBufferSize: 10, } - var ws = func(w http.ResponseWriter, r *http.Request) { + ws := func(w http.ResponseWriter, r *http.Request) { header := make(http.Header) for k, vs := range r.Header { if k == "Test-Cloudflared-Echo" { diff --git a/quic/v3/muxer_test.go b/quic/v3/muxer_test.go index 9fd09ca0..555489f5 100644 --- a/quic/v3/muxer_test.go +++ b/quic/v3/muxer_test.go @@ -136,7 +136,7 @@ func TestDatagramConnServe_ApplicationClosed(t *testing.T) { quic := newMockQuicConn() conn := v3.NewDatagramConn(quic, v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort, cfdflow.NewLimiter(0)), &noopICMPRouter{}, 0, &noopMetrics{}, &log) - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second) defer cancel() err := conn.Serve(ctx) if !errors.Is(err, context.DeadlineExceeded) { @@ -147,12 +147,12 @@ func TestDatagramConnServe_ApplicationClosed(t *testing.T) { func TestDatagramConnServe_ConnectionClosed(t *testing.T) { log := zerolog.Nop() quic := newMockQuicConn() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second) defer cancel() quic.ctx = ctx conn := v3.NewDatagramConn(quic, v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort, cfdflow.NewLimiter(0)), &noopICMPRouter{}, 0, &noopMetrics{}, &log) - err := conn.Serve(context.Background()) + err := conn.Serve(t.Context()) if !errors.Is(err, context.DeadlineExceeded) { t.Fatal(err) } @@ -163,7 +163,7 @@ func TestDatagramConnServe_ReceiveDatagramError(t *testing.T) { quic := &mockQuicConnReadError{err: net.ErrClosed} conn := v3.NewDatagramConn(quic, v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort, cfdflow.NewLimiter(0)), &noopICMPRouter{}, 0, &noopMetrics{}, &log) - err := conn.Serve(context.Background()) + err := conn.Serve(t.Context()) if !errors.Is(err, net.ErrClosed) { t.Fatal(err) } @@ -178,7 +178,7 @@ func TestDatagramConnServe_SessionRegistrationRateLimit(t *testing.T) { conn := v3.NewDatagramConn(quic, sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) // Setup the muxer - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() done := make(chan error, 1) go func() { @@ -230,7 +230,7 @@ func TestDatagramConnServe_ErrorDatagramTypes(t *testing.T) { quic.send <- test.input conn := v3.NewDatagramConn(quic, &mockSessionManager{}, &noopICMPRouter{}, 0, &noopMetrics{}, &log) - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second) defer cancel() err := conn.Serve(ctx) // we cancel the Serve method to check to see if the log output was written since the unsupported datagram @@ -272,7 +272,7 @@ func TestDatagramConnServe_RegisterSession_SessionManagerError(t *testing.T) { conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) // Setup the muxer - ctx, cancel := context.WithCancelCause(context.Background()) + ctx, cancel := context.WithCancelCause(t.Context()) defer cancel(errors.New("other error")) done := make(chan error, 1) go func() { @@ -307,7 +307,7 @@ func TestDatagramConnServe(t *testing.T) { conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) // Setup the muxer - ctx, cancel := context.WithCancelCause(context.Background()) + ctx, cancel := context.WithCancelCause(t.Context()) defer cancel(errors.New("other error")) done := make(chan error, 1) go func() { @@ -356,7 +356,7 @@ func TestDatagramConnServeDecodeMultipleICMPInParallel(t *testing.T) { conn := v3.NewDatagramConn(quic, &sessionManager, router, 0, &noopMetrics{}, &log) // Setup the muxer - ctx, cancel := context.WithCancelCause(context.Background()) + ctx, cancel := context.WithCancelCause(t.Context()) defer cancel(errors.New("other error")) done := make(chan error, 1) go func() { @@ -435,7 +435,7 @@ func TestDatagramConnServe_RegisterTwice(t *testing.T) { conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) // Setup the muxer - ctx, cancel := context.WithCancelCause(context.Background()) + ctx, cancel := context.WithCancelCause(t.Context()) defer cancel(errors.New("other error")) done := make(chan error, 1) go func() { @@ -499,14 +499,14 @@ func TestDatagramConnServe_MigrateConnection(t *testing.T) { conn2 := v3.NewDatagramConn(quic2, &sessionManager, &noopICMPRouter{}, 1, &noopMetrics{}, &log) // Setup the muxer - ctx, cancel := context.WithCancelCause(context.Background()) + ctx, cancel := context.WithCancelCause(t.Context()) defer cancel(errors.New("other error")) done := make(chan error, 1) go func() { done <- conn.Serve(ctx) }() - ctx2, cancel2 := context.WithCancelCause(context.Background()) + ctx2, cancel2 := context.WithCancelCause(t.Context()) defer cancel2(errors.New("other error")) done2 := make(chan error, 1) go func() { @@ -580,7 +580,7 @@ func TestDatagramConnServe_Payload_GetSessionError(t *testing.T) { conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) // Setup the muxer - ctx, cancel := context.WithCancelCause(context.Background()) + ctx, cancel := context.WithCancelCause(t.Context()) defer cancel(errors.New("other error")) done := make(chan error, 1) go func() { @@ -608,7 +608,7 @@ func TestDatagramConnServe_Payload(t *testing.T) { conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) // Setup the muxer - ctx, cancel := context.WithCancelCause(context.Background()) + ctx, cancel := context.WithCancelCause(t.Context()) defer cancel(errors.New("other error")) done := make(chan error, 1) go func() { @@ -637,7 +637,7 @@ func TestDatagramConnServe_ICMPDatagram_TTLDecremented(t *testing.T) { conn := v3.NewDatagramConn(quic, &mockSessionManager{}, router, 0, &noopMetrics{}, &log) // Setup the muxer - ctx, cancel := context.WithCancelCause(context.Background()) + ctx, cancel := context.WithCancelCause(t.Context()) defer cancel(errors.New("other error")) done := make(chan error, 1) go func() { @@ -683,7 +683,7 @@ func TestDatagramConnServe_ICMPDatagram_TTLExceeded(t *testing.T) { conn := v3.NewDatagramConn(quic, &mockSessionManager{}, router, 0, &noopMetrics{}, &log) // Setup the muxer - ctx, cancel := context.WithCancelCause(context.Background()) + ctx, cancel := context.WithCancelCause(t.Context()) defer cancel(errors.New("other error")) done := make(chan error, 1) go func() { diff --git a/quic/v3/session_test.go b/quic/v3/session_test.go index a43a811f..ce058ee4 100644 --- a/quic/v3/session_test.go +++ b/quic/v3/session_test.go @@ -93,7 +93,7 @@ func testSessionServe_Origin(t *testing.T, payload []byte) { session := v3.NewSession(testRequestID, 3*time.Second, origin, testOriginAddr, testLocalAddr, &eyeball, &noopMetrics{}, &log) defer session.Close() - ctx, cancel := context.WithCancelCause(context.Background()) + ctx, cancel := context.WithCancelCause(t.Context()) defer cancel(context.Canceled) done := make(chan error) go func() { @@ -143,7 +143,7 @@ func TestSessionServe_OriginTooLarge(t *testing.T) { done := make(chan error) go func() { - done <- session.Serve(context.Background()) + done <- session.Serve(t.Context()) }() // Attempt to write a payload too large from the origin @@ -173,7 +173,7 @@ func TestSessionServe_Migrate(t *testing.T) { defer session.Close() done := make(chan error) - eyeball1Ctx, cancel := context.WithCancelCause(context.Background()) + eyeball1Ctx, cancel := context.WithCancelCause(t.Context()) go func() { done <- session.Serve(eyeball1Ctx) }() @@ -181,7 +181,7 @@ func TestSessionServe_Migrate(t *testing.T) { // Migrate the session to a new connection before origin sends data eyeball2 := newMockEyeball() eyeball2.connID = 1 - eyeball2Ctx := context.Background() + eyeball2Ctx := t.Context() session.Migrate(&eyeball2, eyeball2Ctx, &log) // Cancel the origin eyeball context; this should not cancel the session @@ -230,7 +230,7 @@ func TestSessionServe_Migrate_CloseContext2(t *testing.T) { defer session.Close() done := make(chan error) - eyeball1Ctx, cancel := context.WithCancelCause(context.Background()) + eyeball1Ctx, cancel := context.WithCancelCause(t.Context()) go func() { done <- session.Serve(eyeball1Ctx) }() @@ -238,7 +238,7 @@ func TestSessionServe_Migrate_CloseContext2(t *testing.T) { // Migrate the session to a new connection before origin sends data eyeball2 := newMockEyeball() eyeball2.connID = 1 - eyeball2Ctx, cancel2 := context.WithCancelCause(context.Background()) + eyeball2Ctx, cancel2 := context.WithCancelCause(t.Context()) session.Migrate(&eyeball2, eyeball2Ctx, &log) // Cancel the origin eyeball context; this should not cancel the session @@ -316,7 +316,7 @@ func TestSessionServe_IdleTimeout(t *testing.T) { defer server.Close() closeAfterIdle := 2 * time.Second session := v3.NewSession(testRequestID, closeAfterIdle, origin, testOriginAddr, testLocalAddr, &noopEyeball{}, &noopMetrics{}, &log) - err := session.Serve(context.Background()) + err := session.Serve(t.Context()) if !errors.Is(err, v3.SessionIdleErr{}) { t.Fatal(err) } @@ -342,7 +342,7 @@ func TestSessionServe_ParentContextCanceled(t *testing.T) { closeAfterIdle := 10 * time.Second session := v3.NewSession(testRequestID, closeAfterIdle, origin, testOriginAddr, testLocalAddr, &noopEyeball{}, &noopMetrics{}, &log) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 2*time.Second) defer cancel() err := session.Serve(ctx) if !errors.Is(err, context.DeadlineExceeded) { @@ -366,7 +366,7 @@ func TestSessionServe_ReadErrors(t *testing.T) { log := zerolog.Nop() origin := newTestErrOrigin(net.ErrClosed, nil) session := v3.NewSession(testRequestID, 30*time.Second, &origin, testOriginAddr, testLocalAddr, &noopEyeball{}, &noopMetrics{}, &log) - err := session.Serve(context.Background()) + err := session.Serve(t.Context()) if !errors.Is(err, net.ErrClosed) { t.Fatal(err) } diff --git a/tunnelrpc/pogs/configuration_manager.go b/tunnelrpc/pogs/configuration_manager.go index 4daae802..e7ff0588 100644 --- a/tunnelrpc/pogs/configuration_manager.go +++ b/tunnelrpc/pogs/configuration_manager.go @@ -96,7 +96,7 @@ func (p *UpdateConfigurationResponse) Unmarshal(s proto.UpdateConfigurationRespo return err } if respErr != "" { - p.Err = fmt.Errorf(respErr) + p.Err = fmt.Errorf("%s", respErr) } return nil } diff --git a/tunnelrpc/pogs/session_manager.go b/tunnelrpc/pogs/session_manager.go index b5552983..8b45f753 100644 --- a/tunnelrpc/pogs/session_manager.go +++ b/tunnelrpc/pogs/session_manager.go @@ -127,7 +127,7 @@ func (p *RegisterUdpSessionResponse) Unmarshal(s proto.RegisterUdpSessionRespons return err } if respErr != "" { - p.Err = fmt.Errorf(respErr) + p.Err = fmt.Errorf("%s", respErr) } p.Spans, err = s.Spans() if err != nil { @@ -164,7 +164,7 @@ func (c SessionManager_PogsClient) RegisterUdpSession(ctx context.Context, sessi } p.SetDstPort(dstPort) p.SetCloseAfterIdleHint(int64(closeAfterIdleHint)) - p.SetTraceContext(traceContext) + _ = p.SetTraceContext(traceContext) return nil }) result, err := promise.Result().Struct() diff --git a/tunnelrpc/quic/request_server_stream_test.go b/tunnelrpc/quic/request_server_stream_test.go index e6972263..987b54df 100644 --- a/tunnelrpc/quic/request_server_stream_test.go +++ b/tunnelrpc/quic/request_server_stream_test.go @@ -22,7 +22,7 @@ const ( ) func TestConnectRequestData(t *testing.T) { - var tests = []struct { + tests := []struct { name string hostname string connectionType pogs.ConnectionType @@ -62,7 +62,7 @@ func TestConnectRequestData(t *testing.T) { } func TestConnectResponseMeta(t *testing.T) { - var tests = []struct { + tests := []struct { name string err error metadata []pogs.Metadata @@ -106,7 +106,7 @@ func TestConnectResponseMeta(t *testing.T) { func TestRegisterUdpSession(t *testing.T) { unregisterMessage := "closed by eyeball" - var tests = []struct { + tests := []struct { name string sessionRPCServer mockSessionRPCServer }{ @@ -140,29 +140,29 @@ func TestRegisterUdpSession(t *testing.T) { sessionRegisteredChan := make(chan struct{}) go func() { ss := NewCloudflaredServer(nil, test.sessionRPCServer, nil, 10*time.Second) - err := ss.Serve(context.Background(), serverStream) + err := ss.Serve(t.Context(), serverStream) assert.NoError(t, err) serverStream.Close() close(sessionRegisteredChan) }() - rpcClientStream, err := NewCloudflaredClient(context.Background(), clientStream, 5*time.Second) + rpcClientStream, err := NewCloudflaredClient(t.Context(), clientStream, 5*time.Second) require.NoError(t, err) - reg, err := rpcClientStream.RegisterUdpSession(context.Background(), test.sessionRPCServer.sessionID, test.sessionRPCServer.dstIP, test.sessionRPCServer.dstPort, testCloseIdleAfterHint, test.sessionRPCServer.traceContext) + reg, err := rpcClientStream.RegisterUdpSession(t.Context(), test.sessionRPCServer.sessionID, test.sessionRPCServer.dstIP, test.sessionRPCServer.dstPort, testCloseIdleAfterHint, test.sessionRPCServer.traceContext) require.NoError(t, err) require.NoError(t, reg.Err) // Different sessionID, the RPC server should reject the registration - reg, err = rpcClientStream.RegisterUdpSession(context.Background(), uuid.New(), test.sessionRPCServer.dstIP, test.sessionRPCServer.dstPort, testCloseIdleAfterHint, test.sessionRPCServer.traceContext) + reg, err = rpcClientStream.RegisterUdpSession(t.Context(), uuid.New(), test.sessionRPCServer.dstIP, test.sessionRPCServer.dstPort, testCloseIdleAfterHint, test.sessionRPCServer.traceContext) require.NoError(t, err) require.Error(t, reg.Err) - require.NoError(t, rpcClientStream.UnregisterUdpSession(context.Background(), test.sessionRPCServer.sessionID, unregisterMessage)) + require.NoError(t, rpcClientStream.UnregisterUdpSession(t.Context(), test.sessionRPCServer.sessionID, unregisterMessage)) // Different sessionID, the RPC server should reject the unregistration - require.Error(t, rpcClientStream.UnregisterUdpSession(context.Background(), uuid.New(), unregisterMessage)) + require.Error(t, rpcClientStream.UnregisterUdpSession(t.Context(), uuid.New(), unregisterMessage)) rpcClientStream.Close() <-sessionRegisteredChan @@ -185,14 +185,14 @@ func TestManageConfiguration(t *testing.T) { updatedChan := make(chan struct{}) go func() { server := NewCloudflaredServer(nil, nil, configRPCServer, 10*time.Second) - err := server.Serve(context.Background(), serverStream) + err := server.Serve(t.Context(), serverStream) assert.NoError(t, err) serverStream.Close() close(updatedChan) }() - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(t.Context(), time.Second) defer cancel() rpcClientStream, err := NewCloudflaredClient(ctx, clientStream, 5*time.Second) require.NoError(t, err) diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md index 28bdd2fc..66a8a0f8 100644 --- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md @@ -1,3 +1,32 @@ +## Changed + + - Defined a custom error, ErrUnexpectedSignatureAlgorithm, returned when a JWS + header contains an unsupported signature algorithm. + +# v4.0.4 + +## Fixed + + - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a + breaking change. See #136 / #137. + +# v4.0.3 + +## Changed + + - Allow unmarshalling JSONWebKeySets with unsupported key types (#130) + - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129) + - Dependency updates + +# v4.0.2 + +## Changed + + - Improved documentation of Verify() to note that JSONWebKeySet is a supported + argument type (#104) + - Defined exported error values for missing x5c header and unsupported elliptic + curves error cases (#117) + # v4.0.1 ## Fixed diff --git a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md index b63e1f8f..4b4805ad 100644 --- a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md +++ b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md @@ -7,9 +7,3 @@ When submitting code, please make every effort to follow existing conventions and style in order to keep the code as readable as possible. Please also make sure all tests pass by running `go test`, and format your code with `go fmt`. We also recommend using `golint` and `errcheck`. - -Before your code can be accepted into the project you must also sign the -Individual Contributor License Agreement. We use [cla-assistant.io][1] and you -will be prompted to sign once a pull request is opened. - -[1]: https://cla-assistant.io/ diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md index 79a7c5ec..02b57495 100644 --- a/vendor/github.com/go-jose/go-jose/v4/README.md +++ b/vendor/github.com/go-jose/go-jose/v4/README.md @@ -9,14 +9,6 @@ Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. This includes support for JSON Web Encryption, JSON Web Signature, and JSON Web Token standards. -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - ## Overview The implementation follows the @@ -109,6 +101,6 @@ allows attaching a key id. Examples can be found in the Godoc reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/v4/jose-util) +[`jose-util`](https://github.com/go-jose/go-jose/tree/main/jose-util) subdirectory also contains a small command-line utility which might be useful as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go index aba08424..d81b03b4 100644 --- a/vendor/github.com/go-jose/go-jose/v4/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go @@ -459,7 +459,10 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - key := tryJWKS(decryptionKey, obj.Header) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return nil, err + } decrypter, err := newDecrypter(key) if err != nil { return nil, err @@ -529,7 +532,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - key := tryJWKS(decryptionKey, obj.Header) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return -1, Header{}, nil, err + } decrypter, err := newDecrypter(key) if err != nil { return -1, Header{}, nil, err diff --git a/vendor/github.com/go-jose/go-jose/v4/jwe.go b/vendor/github.com/go-jose/go-jose/v4/jwe.go index 89f03ee3..9f1322dc 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwe.go @@ -288,10 +288,11 @@ func ParseEncryptedCompact( keyAlgorithms []KeyAlgorithm, contentEncryption []ContentEncryption, ) (*JSONWebEncryption, error) { - parts := strings.Split(input, ".") - if len(parts) != 5 { + // Five parts is four separators + if strings.Count(input, ".") != 4 { return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts") } + parts := strings.SplitN(input, ".", 5) rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) if err != nil { diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go index a565aaab..9e57e93b 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -239,10 +239,10 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { keyPub = key } } else { - err = fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv) + return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv) } default: - err = fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty) + return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty) } if err != nil { @@ -779,7 +779,13 @@ func (key rawJSONWebKey) symmetricKey() ([]byte, error) { return key.K.bytes(), nil } -func tryJWKS(key interface{}, headers ...Header) interface{} { +var ( + // ErrJWKSKidNotFound is returned when a JWKS does not contain a JWK with a + // key ID which matches one in the provided tokens headers. + ErrJWKSKidNotFound = errors.New("go-jose/go-jose: JWK with matching kid not found in JWK Set") +) + +func tryJWKS(key interface{}, headers ...Header) (interface{}, error) { var jwks JSONWebKeySet switch jwksType := key.(type) { @@ -788,9 +794,11 @@ func tryJWKS(key interface{}, headers ...Header) interface{} { case JSONWebKeySet: jwks = jwksType default: - return key + // If the specified key is not a JWKS, return as is. + return key, nil } + // Determine the KID to search for from the headers. var kid string for _, header := range headers { if header.KeyID != "" { @@ -799,14 +807,17 @@ func tryJWKS(key interface{}, headers ...Header) interface{} { } } + // If no KID is specified in the headers, reject. if kid == "" { - return key + return nil, ErrJWKSKidNotFound } + // Find the JWK with the matching KID. If no JWK with the specified KID is + // found, reject. keys := jwks.Key(kid) if len(keys) == 0 { - return key + return nil, ErrJWKSKidNotFound } - return keys[0].Key + return keys[0].Key, nil } diff --git a/vendor/github.com/go-jose/go-jose/v4/jws.go b/vendor/github.com/go-jose/go-jose/v4/jws.go index 3a912301..be2b7330 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jws.go +++ b/vendor/github.com/go-jose/go-jose/v4/jws.go @@ -75,7 +75,14 @@ type Signature struct { original *rawSignatureInfo } -// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. +// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. Validation fails if +// the JWS is signed with an algorithm that isn't in the provided list of signature algorithms. +// Applications should decide for themselves which signature algorithms are acceptable. If you're +// not sure which signature algorithms your application might receive, consult the documentation of +// the program which provides them or the protocol that you are implementing. You can also try +// getting an example JWS and decoding it with a tool like https://jwt.io to see what its "alg" +// header parameter indicates. The signature on the JWS does not get validated during parsing. Call +// Verify() after parsing to validate the signature and obtain the payload. // // https://datatracker.ietf.org/doc/html/rfc7515#section-7 func ParseSigned( @@ -90,7 +97,14 @@ func ParseSigned( return parseSignedCompact(signature, nil, signatureAlgorithms) } -// ParseSignedCompact parses a message in JWS Compact Serialization. +// ParseSignedCompact parses a message in JWS Compact Serialization. Validation fails if the JWS is +// signed with an algorithm that isn't in the provided list of signature algorithms. Applications +// should decide for themselves which signature algorithms are acceptable.If you're not sure which +// signature algorithms your application might receive, consult the documentation of the program +// which provides them or the protocol that you are implementing. You can also try getting an +// example JWS and decoding it with a tool like https://jwt.io to see what its "alg" header +// parameter indicates. The signature on the JWS does not get validated during parsing. Call +// Verify() after parsing to validate the signature and obtain the payload. // // https://datatracker.ietf.org/doc/html/rfc7515#section-7.1 func ParseSignedCompact( @@ -101,6 +115,15 @@ func ParseSignedCompact( } // ParseDetached parses a signed message in compact serialization format with detached payload. +// Validation fails if the JWS is signed with an algorithm that isn't in the provided list of +// signature algorithms. Applications should decide for themselves which signature algorithms are +// acceptable. If you're not sure which signature algorithms your application might receive, consult +// the documentation of the program which provides them or the protocol that you are implementing. +// You can also try getting an example JWS and decoding it with a tool like https://jwt.io to see +// what its "alg" header parameter indicates. The signature on the JWS does not get validated during +// parsing. Call Verify() after parsing to validate the signature and obtain the payload. +// +// https://datatracker.ietf.org/doc/html/rfc7515#appendix-F func ParseDetached( signature string, payload []byte, @@ -181,6 +204,25 @@ func containsSignatureAlgorithm(haystack []SignatureAlgorithm, needle SignatureA return false } +// ErrUnexpectedSignatureAlgorithm is returned when the signature algorithm in +// the JWS header does not match one of the expected algorithms. +type ErrUnexpectedSignatureAlgorithm struct { + // Got is the signature algorithm found in the JWS header. + Got SignatureAlgorithm + expected []SignatureAlgorithm +} + +func (e *ErrUnexpectedSignatureAlgorithm) Error() string { + return fmt.Sprintf("unexpected signature algorithm %q; expected %q", e.Got, e.expected) +} + +func newErrUnexpectedSignatureAlgorithm(got SignatureAlgorithm, expected []SignatureAlgorithm) error { + return &ErrUnexpectedSignatureAlgorithm{ + Got: got, + expected: expected, + } +} + // sanitized produces a cleaned-up JWS object from the raw JSON. func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgorithm) (*JSONWebSignature, error) { if len(signatureAlgorithms) == 0 { @@ -236,8 +278,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo alg := SignatureAlgorithm(signature.Header.Algorithm) if !containsSignatureAlgorithm(signatureAlgorithms, alg) { - return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", - alg, signatureAlgorithms) + return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms) } if signature.header != nil { @@ -285,8 +326,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo alg := SignatureAlgorithm(obj.Signatures[i].Header.Algorithm) if !containsSignatureAlgorithm(signatureAlgorithms, alg) { - return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", - alg, signatureAlgorithms) + return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms) } if obj.Signatures[i].header != nil { @@ -327,10 +367,11 @@ func parseSignedCompact( payload []byte, signatureAlgorithms []SignatureAlgorithm, ) (*JSONWebSignature, error) { - parts := strings.Split(input, ".") - if len(parts) != 3 { + // Three parts is two separators + if strings.Count(input, ".") != 2 { return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") } + parts := strings.SplitN(input, ".", 3) if parts[1] != "" && payload != nil { return nil, fmt.Errorf("go-jose/go-jose: payload is not detached") diff --git a/vendor/github.com/go-jose/go-jose/v4/opaque.go b/vendor/github.com/go-jose/go-jose/v4/opaque.go index 68db085e..42942723 100644 --- a/vendor/github.com/go-jose/go-jose/v4/opaque.go +++ b/vendor/github.com/go-jose/go-jose/v4/opaque.go @@ -83,6 +83,9 @@ func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg Sig } // OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key. +// +// Note: this cannot currently be implemented outside this package because of its +// unexported method. type OpaqueKeyEncrypter interface { // KeyID returns the kid KeyID() string diff --git a/vendor/github.com/go-jose/go-jose/v4/shared.go b/vendor/github.com/go-jose/go-jose/v4/shared.go index b485e43b..1ec33961 100644 --- a/vendor/github.com/go-jose/go-jose/v4/shared.go +++ b/vendor/github.com/go-jose/go-jose/v4/shared.go @@ -71,6 +71,12 @@ var ( // ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a // nonce header parameter was included in an unprotected header object. ErrUnprotectedNonce = errors.New("go-jose/go-jose: Nonce parameter included in unprotected header") + + // ErrMissingX5cHeader indicates that the JWT header is missing x5c headers. + ErrMissingX5cHeader = errors.New("go-jose/go-jose: no x5c header present in message") + + // ErrUnsupportedEllipticCurve indicates unsupported or unknown elliptic curve has been found. + ErrUnsupportedEllipticCurve = errors.New("go-jose/go-jose: unsupported/unknown elliptic curve") ) // Key management algorithms @@ -199,7 +205,7 @@ type Header struct { // not be validated with the given verify options. func (h Header) Certificates(opts x509.VerifyOptions) ([][]*x509.Certificate, error) { if len(h.certificates) == 0 { - return nil, errors.New("go-jose/go-jose: no x5c header present in message") + return nil, ErrMissingX5cHeader } leaf := h.certificates[0] @@ -501,7 +507,7 @@ func curveName(crv elliptic.Curve) (string, error) { case elliptic.P521(): return "P-521", nil default: - return "", fmt.Errorf("go-jose/go-jose: unsupported/unknown elliptic curve") + return "", ErrUnsupportedEllipticCurve } } diff --git a/vendor/github.com/go-jose/go-jose/v4/signing.go b/vendor/github.com/go-jose/go-jose/v4/signing.go index f0b0294f..3dec0112 100644 --- a/vendor/github.com/go-jose/go-jose/v4/signing.go +++ b/vendor/github.com/go-jose/go-jose/v4/signing.go @@ -358,6 +358,8 @@ func (ctx *genericSigner) Options() SignerOptions { // - *rsa.PublicKey // - *JSONWebKey // - JSONWebKey +// - *JSONWebKeySet +// - JSONWebKeySet // - []byte (an HMAC key) // - Any type that implements the OpaqueVerifier interface. // @@ -388,7 +390,10 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // The verificationKey argument must have one of the types allowed for the // verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { - key := tryJWKS(verificationKey, obj.headers()...) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return err + } verifier, err := newVerifier(key) if err != nil { return err @@ -453,7 +458,10 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // The verificationKey argument must have one of the types allowed for the // verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { - key := tryJWKS(verificationKey, obj.headers()...) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return -1, Signature{}, err + } verifier, err := newVerifier(key) if err != nil { return -1, Signature{}, err diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric.go b/vendor/github.com/go-jose/go-jose/v4/symmetric.go index a69103b0..09efefb2 100644 --- a/vendor/github.com/go-jose/go-jose/v4/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v4/symmetric.go @@ -21,6 +21,7 @@ import ( "crypto/aes" "crypto/cipher" "crypto/hmac" + "crypto/pbkdf2" "crypto/rand" "crypto/sha256" "crypto/sha512" @@ -30,8 +31,6 @@ import ( "hash" "io" - "golang.org/x/crypto/pbkdf2" - josecipher "github.com/go-jose/go-jose/v4/cipher" ) @@ -330,7 +329,10 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie // derive key keyLen, h := getPbkdf2Params(alg) - key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h) + key, err := pbkdf2.Key(h, string(ctx.key), salt, ctx.p2c, keyLen) + if err != nil { + return recipientInfo{}, nil + } // use AES cipher with derived key block, err := aes.NewCipher(key) @@ -432,7 +434,10 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien // derive key keyLen, h := getPbkdf2Params(alg) - key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h) + key, err := pbkdf2.Key(h, string(ctx.key), salt, p2c, keyLen) + if err != nil { + return nil, err + } // use AES cipher with derived key block, err := aes.NewCipher(key) diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 8969526a..7c7f0c69 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index fb2f866f..30568e76 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -257,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -264,76 +270,102 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line + buf.WriteByte('{') // for the whole record } + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if f.parentValuesStr != "" { + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, true) // escape user-provided keys + + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue + } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) + } + + if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } - buf.WriteString(f.parentValuesStr) - continuing = true - } - - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- - } - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - continuing = true - } - - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups + buf.WriteString(bodyStr) } if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line + buf.WriteByte('}') // for the whole record } return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true + } + + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') + } + + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { @@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any { // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. -func (f *Formatter) startGroup(group string) { +func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. - if group == "" { + if name == "" { return } - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. - f.group = group - f.groupDepth++ + f.groupName = name f.valuesStr = "" f.values = nil } @@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.editorconfig rename to vendor/github.com/go-task/slim-sprig/v3/.editorconfig diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitattributes rename to vendor/github.com/go-task/slim-sprig/v3/.gitattributes diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitignore rename to vendor/github.com/go-task/slim-sprig/v3/.gitignore diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md similarity index 95% rename from vendor/github.com/go-task/slim-sprig/CHANGELOG.md rename to vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md index 61d8ebff..2ce45dd4 100644 --- a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md +++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + ## Release 3.2.0 (2020-12-14) ### Added diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt similarity index 100% rename from vendor/github.com/go-task/slim-sprig/LICENSE.txt rename to vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md similarity index 88% rename from vendor/github.com/go-task/slim-sprig/README.md rename to vendor/github.com/go-task/slim-sprig/v3/README.md index 72579471..b5ab5642 100644 --- a/vendor/github.com/go-task/slim-sprig/README.md +++ b/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -1,4 +1,4 @@ -# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with all functions that depend on external (non standard library) or crypto packages diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml similarity index 89% rename from vendor/github.com/go-task/slim-sprig/Taskfile.yml rename to vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml index cdcfd223..8e6346bb 100644 --- a/vendor/github.com/go-task/slim-sprig/Taskfile.yml +++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -1,6 +1,6 @@ # https://taskfile.dev -version: '2' +version: '3' tasks: default: diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/crypto.go rename to vendor/github.com/go-task/slim-sprig/v3/crypto.go diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/date.go rename to vendor/github.com/go-task/slim-sprig/v3/date.go diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/defaults.go rename to vendor/github.com/go-task/slim-sprig/v3/defaults.go diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/dict.go rename to vendor/github.com/go-task/slim-sprig/v3/dict.go diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/doc.go rename to vendor/github.com/go-task/slim-sprig/v3/doc.go diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/functions.go rename to vendor/github.com/go-task/slim-sprig/v3/functions.go diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/list.go rename to vendor/github.com/go-task/slim-sprig/v3/list.go diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/network.go rename to vendor/github.com/go-task/slim-sprig/v3/network.go diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/numeric.go rename to vendor/github.com/go-task/slim-sprig/v3/numeric.go diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/reflect.go rename to vendor/github.com/go-task/slim-sprig/v3/reflect.go diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/regex.go rename to vendor/github.com/go-task/slim-sprig/v3/regex.go diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/strings.go rename to vendor/github.com/go-task/slim-sprig/v3/strings.go diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/url.go rename to vendor/github.com/go-task/slim-sprig/v3/url.go diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 182c926b..8ce9d3cf 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -122,6 +122,7 @@ func (p *Profile) preEncode() { } p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + p.docURLX = addString(strings, p.DocURL) p.stringTable = make([]string, len(strings)) for s, i := range strings { @@ -156,6 +157,7 @@ func (p *Profile) encode(b *buffer) { encodeInt64Opt(b, 12, p.Period) encodeInt64s(b, 13, p.commentX) encodeInt64(b, 14, p.defaultSampleTypeX) + encodeInt64Opt(b, 15, p.docURLX) } var profileDecoder = []decoder{ @@ -237,6 +239,8 @@ var profileDecoder = []decoder{ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, // int64 defaultSampleType = 14 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, + // string doc_link = 15; + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) }, } // postDecode takes the unexported fields populated by decode (with @@ -384,6 +388,7 @@ func (p *Profile) postDecode() error { p.commentX = nil p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.DocURL, err = getString(p.stringTable, &p.docURLX, err) p.stringTable = nil return err } @@ -530,6 +535,7 @@ func (p *Line) decoder() []decoder { func (p *Line) encode(b *buffer) { encodeUint64Opt(b, 1, p.functionIDX) encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) } var lineDecoder = []decoder{ @@ -538,6 +544,8 @@ var lineDecoder = []decoder{ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, // optional int64 line = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, } func (p *Function) decoder() []decoder { diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go index 91f45e53..4580bab1 100644 --- a/vendor/github.com/google/pprof/profile/legacy_java_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } @@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) { } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 4b66282c..ba4d7464 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -326,12 +326,13 @@ func (l *Location) key() locationKey { key.addr -= l.Mapping.Start key.mappingID = l.Mapping.ID } - lines := make([]string, len(l.Line)*2) + lines := make([]string, len(l.Line)*3) for i, line := range l.Line { if line.Function != nil { lines[i*2] = strconv.FormatUint(line.Function.ID, 16) } lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) } key.lines = strings.Join(lines, "|") return key @@ -418,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line { ln := Line{ Function: pm.mapFunction(src.Function), Line: src.Line, + Column: src.Column, } return ln } @@ -474,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { var timeNanos, durationNanos, period int64 var comments []string seenComments := map[string]bool{} + var docURL string var defaultSampleType string for _, s := range srcs { if timeNanos == 0 || s.TimeNanos < timeNanos { @@ -492,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { if defaultSampleType == "" { defaultSampleType = s.DefaultSampleType } + if docURL == "" { + docURL = s.DocURL + } } p := &Profile{ @@ -507,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { Comments: comments, DefaultSampleType: defaultSampleType, + DocURL: docURL, } copy(p.SampleType, srcs[0].SampleType) return p, nil diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 60ef7e92..f47a2439 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -39,6 +39,7 @@ type Profile struct { Location []*Location Function []*Function Comments []string + DocURL string DropFrames string KeepFrames string @@ -53,6 +54,7 @@ type Profile struct { encodeMu sync.Mutex commentX []int64 + docURLX int64 dropFramesX int64 keepFramesX int64 stringTable []string @@ -145,6 +147,7 @@ type Location struct { type Line struct { Function *Function Line int64 + Column int64 functionIDX uint64 } @@ -436,7 +439,7 @@ func (p *Profile) CheckValid() error { // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. -func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function @@ -458,7 +461,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address } // Aggregate locations - if !inlineFrame || !address || !linenumber { + if !inlineFrame || !address || !linenumber || !columnnumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] @@ -466,6 +469,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address if !linenumber { for i := range l.Line { l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 } } if !address { @@ -548,6 +557,9 @@ func (p *Profile) String() string { for _, c := range p.Comments { ss = append(ss, "Comment: "+c) } + if url := p.DocURL; url != "" { + ss = append(ss, fmt.Sprintf("Doc: %s", url)) + } if pt := p.PeriodType; pt != nil { ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) } @@ -627,10 +639,11 @@ func (l *Location) string() string { for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { - lnStr = fmt.Sprintf("%s %s:%d s=%d", + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, + l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" @@ -836,10 +849,10 @@ func (p *Profile) HasFileLines() bool { // Unsymbolizable returns true if a mapping points to a binary for which // locations can't be symbolized in principle, at least now. Examples are -// "[vdso]", [vsyscall]" and some others, see the code. +// "[vdso]", "[vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 743555dd..f61356db 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -24,15 +24,15 @@ const ( var SingletonFormatter = New(ColorModeTerminal) -func F(format string, args ...interface{}) string { +func F(format string, args ...any) string { return SingletonFormatter.F(format, args...) } -func Fi(indentation uint, format string, args ...interface{}) string { +func Fi(indentation uint, format string, args ...any) string { return SingletonFormatter.Fi(indentation, format, args...) } -func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { +func Fiw(indentation uint, maxWidth uint, format string, args ...any) string { return SingletonFormatter.Fiw(indentation, maxWidth, format, args...) } @@ -82,6 +82,10 @@ func New(colorMode ColorMode) Formatter { return fmt.Sprintf("\x1b[38;5;%dm", colorCode) } + if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor { + colorMode = ColorModeNone + } + f := Formatter{ ColorMode: colorMode, colors: map[string]string{ @@ -111,15 +115,15 @@ func New(colorMode ColorMode) Formatter { return f } -func (f Formatter) F(format string, args ...interface{}) string { +func (f Formatter) F(format string, args ...any) string { return f.Fi(0, format, args...) } -func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string { +func (f Formatter) Fi(indentation uint, format string, args ...any) string { return f.Fiw(indentation, 0, format, args...) } -func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { +func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...any) string { out := f.style(format) if len(args) > 0 { out = fmt.Sprintf(out, args...) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 5db5d1a7..2b36b2fe 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -2,6 +2,8 @@ package build import ( "fmt" + "os" + "path" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" @@ -42,7 +44,7 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go internal.VerifyCLIAndFrameworkVersion(suites) opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers()) - opc.StartCompiling(suites, goFlagsConfig) + opc.StartCompiling(suites, goFlagsConfig, true) for { suiteIdx, suite := opc.Next() @@ -53,7 +55,22 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go if suite.State.Is(internal.TestSuiteStateFailedToCompile) { fmt.Println(suite.CompilationError.Error()) } else { - fmt.Printf("Compiled %s.test\n", suite.PackageName) + var testBinPath string + if len(goFlagsConfig.O) != 0 { + stat, err := os.Stat(goFlagsConfig.O) + if err != nil { + panic(err) + } + if stat.IsDir() { + testBinPath = goFlagsConfig.O + "/" + suite.PackageName + ".test" + } else { + testBinPath = goFlagsConfig.O + } + } + if len(testBinPath) == 0 { + testBinPath = path.Join(suite.Path, suite.PackageName+".test") + } + fmt.Printf("Compiled %s\n", testBinPath) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go index 2efd2860..f0e7331f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -12,7 +12,7 @@ func Abort(details AbortDetails) { panic(details) } -func AbortGracefullyWith(format string, args ...interface{}) { +func AbortGracefullyWith(format string, args ...any) { Abort(AbortDetails{ ExitCode: 0, Error: fmt.Errorf(format, args...), @@ -20,7 +20,7 @@ func AbortGracefullyWith(format string, args ...interface{}) { }) } -func AbortWith(format string, args ...interface{}) { +func AbortWith(format string, args ...any) { Abort(AbortDetails{ ExitCode: 1, Error: fmt.Errorf(format, args...), @@ -28,7 +28,7 @@ func AbortWith(format string, args ...interface{}) { }) } -func AbortWithUsage(format string, args ...interface{}) { +func AbortWithUsage(format string, args ...any) { Abort(AbortDetails{ ExitCode: 1, Error: fmt.Errorf(format, args...), diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go index 12e0e565..79b83a3a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -24,7 +24,11 @@ func (c Command) Run(args []string, additionalArgs []string) { if err != nil { AbortWithUsage(err.Error()) } - + for _, arg := range args { + if len(arg) > 1 && strings.HasPrefix(arg, "-") { + AbortWith(types.GinkgoErrors.FlagAfterPositionalParameter().Error()) + } + } c.Command(args, additionalArgs) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go index 88dd8d6b..c3f6d3a1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -68,7 +68,6 @@ func (p Program) RunAndExit(osArgs []string) { fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) } p.Exiter(exitCode) - return }() args, additionalArgs := []string{}, []string{} @@ -157,7 +156,6 @@ func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { p.EmitUsage(writer) Abort(AbortDetails{ExitCode: 1}) } - return } func (p Program) EmitUsage(writer io.Writer) { diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go index 73aff0b7..b2dc59be 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -7,7 +7,7 @@ import ( "os" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index be01dec9..cf3b7cb6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -10,7 +10,7 @@ import ( "strings" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" @@ -174,6 +174,7 @@ func moduleName(modRoot string) string { if err != nil { return "" } + defer modFile.Close() mod := make([]byte, 128) _, err = modFile.Read(mod) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go index 86da7340..7bbe6be0 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -11,7 +11,7 @@ import ( "github.com/onsi/ginkgo/v2/types" ) -func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite { +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) TestSuite { if suite.PathToCompiledTest != "" { return suite } @@ -25,6 +25,18 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite return suite } + if len(goFlagsConfig.O) > 0 { + userDefinedPath, err := filepath.Abs(goFlagsConfig.O) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error()) + return suite + } + path = userDefinedPath + } + + goFlagsConfig.O = path + ginkgoInvocationPath, _ := os.Getwd() ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) packagePath := suite.AbsPath() @@ -34,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) return suite } - args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath, preserveSymbols) if err != nil { suite.State = TestSuiteStateFailedToCompile suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) @@ -108,7 +120,7 @@ func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { } } -func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) { +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) { opc.stopped = false opc.idx = 0 opc.numSuites = len(suites) @@ -123,7 +135,7 @@ func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsCon stopped := opc.stopped opc.mutex.Unlock() if !stopped { - suite = CompileSuite(suite, goFlagsConfig) + suite = CompileSuite(suite, goFlagsConfig, preserveSymbols) } c <- suite } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go new file mode 100644 index 00000000..87cfa111 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -0,0 +1,129 @@ +// Copyright (c) 2015, Wade Simmons +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gocovmerge takes the results from multiple `go test -coverprofile` +// runs and merges them into one profile + +// this file was originally taken from the gocovmerge project +// see also: https://go.shabbyrobe.org/gocovmerge +package internal + +import ( + "fmt" + "io" + "sort" + + "golang.org/x/tools/cover" +) + +func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { + i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) + if i < len(profiles) && profiles[i].FileName == p.FileName { + MergeCoverProfiles(profiles[i], p) + } else { + profiles = append(profiles, nil) + copy(profiles[i+1:], profiles[i:]) + profiles[i] = p + } + return profiles +} + +func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { + if len(profiles) == 0 { + return nil + } + if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { + return err + } + for _, p := range profiles { + for _, b := range p.Blocks { + if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { + return err + } + } + } + return nil +} + +func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { + if into.Mode != merge.Mode { + return fmt.Errorf("cannot merge profiles with different modes") + } + // Since the blocks are sorted, we can keep track of where the last block + // was inserted and only look at the blocks after that as targets for merge + startIndex := 0 + for _, b := range merge.Blocks { + var err error + startIndex, err = mergeProfileBlock(into, b, startIndex) + if err != nil { + return err + } + } + return nil +} + +func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { + sortFunc := func(i int) bool { + pi := p.Blocks[i+startIndex] + return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) + } + + i := 0 + if !sortFunc(i) { + i = sort.Search(len(p.Blocks)-startIndex, sortFunc) + } + + i += startIndex + if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { + if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { + return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) + } + switch p.Mode { + case "set": + p.Blocks[i].Count |= pb.Count + case "count", "atomic": + p.Blocks[i].Count += pb.Count + default: + return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) + } + + } else { + if i > 0 { + pa := p.Blocks[i-1] + if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { + return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) + } + } + if i < len(p.Blocks)-1 { + pa := p.Blocks[i+1] + if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { + return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) + } + } + p.Blocks = append(p.Blocks, cover.ProfileBlock{}) + copy(p.Blocks[i+1:], p.Blocks[i:]) + p.Blocks[i] = pb + } + + return i + 1, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index bd3c6d02..8e16d2bb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -1,7 +1,6 @@ package internal import ( - "bytes" "fmt" "os" "os/exec" @@ -12,6 +11,7 @@ import ( "github.com/google/pprof/profile" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/cover" ) func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { @@ -144,38 +144,27 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC return messages, nil } -//loads each profile, combines them, deletes them, stores them in destination +// loads each profile, merges them, deletes them, stores them in destination func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { - combined := &bytes.Buffer{} - modeRegex := regexp.MustCompile(`^mode: .*\n`) - for i, profile := range profiles { - contents, err := os.ReadFile(profile) + var merged []*cover.Profile + for _, file := range profiles { + parsedProfiles, err := cover.ParseProfiles(file) if err != nil { - return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + return err } - os.Remove(profile) - - // remove the cover mode line from every file - // except the first one - if i > 0 { - contents = modeRegex.ReplaceAll(contents, []byte{}) - } - - _, err = combined.Write(contents) - - // Add a newline to the end of every file if missing. - if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { - _, err = combined.Write([]byte("\n")) - } - - if err != nil { - return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + os.Remove(file) + for _, p := range parsedProfiles { + merged = AddCoverProfile(merged, p) } } - - err := os.WriteFile(destination, combined.Bytes(), 0666) + dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { - return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + return err + } + defer dst.Close() + err = DumpCoverProfiles(merged, dst) + if err != nil { + return err } return nil } @@ -184,7 +173,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) { cmd := exec.Command("go", "tool", "cover", "-func", profile) output, err := cmd.CombinedOutput() if err != nil { - return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output)) } re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) matches := re.FindStringSubmatch(string(output)) @@ -208,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error { return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) } prof, err := profile.Parse(proFile) + _ = proFile.Close() if err != nil { return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go index 64dcb1b7..df99875b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -7,6 +7,7 @@ import ( "path" "path/filepath" "regexp" + "runtime" "strings" "github.com/onsi/ginkgo/v2/types" @@ -192,7 +193,7 @@ func precompiledTestSuite(path string) (TestSuite, error) { return TestSuite{}, errors.New("this is not a .test binary") } - if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 { + if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 { return TestSuite{}, errors.New("this is not executable") } @@ -225,7 +226,7 @@ func suitesInDir(dir string, recurse bool) TestSuites { files, _ := os.ReadDir(dir) re := regexp.MustCompile(`^[^._].*_test\.go$`) for _, file := range files { - if !file.IsDir() && re.Match([]byte(file.Name())) { + if !file.IsDir() && re.MatchString(file.Name()) { suite := TestSuite{ Path: relPath(dir), PackageName: packageNameForSuite(dir), @@ -240,7 +241,7 @@ func suitesInDir(dir string, recurse bool) TestSuites { if recurse { re = regexp.MustCompile(`^[._]`) for _, file := range files { - if file.IsDir() && !re.Match([]byte(file.Name())) { + if file.IsDir() && !re.MatchString(file.Name()) { suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) } } @@ -271,7 +272,7 @@ func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) for _, file := range files { - if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { + if !file.IsDir() && reTestFile.MatchString(file.Name()) { contents, _ := os.ReadFile(dir + "/" + file.Name()) if reGinkgo.Match(contents) { return true diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go index e9abb27d..bd6b8fbf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" "os" - + _ "go.uber.org/automaxprocs" "github.com/onsi/ginkgo/v2/ginkgo/build" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/generators" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go index 958daccb..5d8d00bb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -1,10 +1,11 @@ package outline import ( - "github.com/onsi/ginkgo/v2/types" "go/ast" "go/token" "strconv" + + "github.com/onsi/ginkgo/v2/types" ) const ( diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go index 67ec5ab7..f0a6b5d2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -28,14 +28,7 @@ func packageNameForImport(f *ast.File, path string) *string { } name := spec.Name.String() if name == "" { - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } + name = "ginkgo" } if name == "." { name = "" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go index c2327cda..e99d557d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -1,10 +1,13 @@ package outline import ( + "bytes" + "encoding/csv" "encoding/json" "fmt" "go/ast" "go/token" + "strconv" "strings" "golang.org/x/tools/go/ast/inspector" @@ -84,9 +87,11 @@ func (o *outline) String() string { // StringIndent returns a CSV-formated outline, but every line is indented by // one 'width' of spaces for every level of nesting. func (o *outline) StringIndent(width int) string { - var b strings.Builder + var b bytes.Buffer b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") + csvWriter := csv.NewWriter(&b) + currentIndent := 0 pre := func(n *ginkgoNode) { b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) @@ -96,8 +101,22 @@ func (o *outline) StringIndent(width int) string { } else { labels = strings.Join(n.Labels, ", ") } - //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings - b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels)) + + row := []string{ + n.Name, + n.Text, + strconv.Itoa(n.Start), + strconv.Itoa(n.End), + strconv.FormatBool(n.Spec), + strconv.FormatBool(n.Focused), + strconv.FormatBool(n.Pending), + labels, + } + csvWriter.Write(row) + + // Ensure we write to `b' before the next `b.WriteString()', which might be adding indentation + csvWriter.Flush() + currentIndent += width } post := func(n *ginkgoNode) { @@ -106,5 +125,6 @@ func (o *outline) StringIndent(width int) string { for _, n := range o.Nodes { n.Walk(pre, post) } + return b.String() } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go index aaed4d57..03875b97 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -107,7 +107,7 @@ OUTER_LOOP: } opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) - opc.StartCompiling(suites, r.goFlagsConfig) + opc.StartCompiling(suites, r.goFlagsConfig, false) SUITE_LOOP: for { @@ -142,7 +142,7 @@ OUTER_LOOP: } if !endTime.IsZero() { - r.suiteConfig.Timeout = endTime.Sub(time.Now()) + r.suiteConfig.Timeout = time.Until(endTime) if r.suiteConfig.Timeout <= 0 { suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout opc.StopAndDrain() diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go index f5ddff30..a34d9435 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -78,7 +78,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) { if err != nil { continue } - if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) { + if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { d.addDepIfNotPresent(pkg.Dir, depth) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index e9f7ec0c..0e6ae1f2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strings" "time" ) @@ -79,7 +80,11 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } - if goTestRegExp.Match([]byte(info.Name())) { + if isHiddenFile(info) { + continue + } + + if goTestRegExp.MatchString(info.Name()) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { testModifiedTime = info.ModTime() @@ -87,7 +92,7 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } - if p.watchRegExp.Match([]byte(info.Name())) { + if p.watchRegExp.MatchString(info.Name()) { codeHash += p.hashForFileInfo(info) if info.ModTime().After(codeModifiedTime) { codeModifiedTime = info.ModTime() @@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go index bde4193c..fe1ca305 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -153,7 +153,7 @@ func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { } func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { - suite = internal.CompileSuite(suite, w.goFlagsConfig) + suite = internal.CompileSuite(suite, w.goFlagsConfig, false) if suite.State.Is(internal.TestSuiteStateFailedToCompile) { fmt.Println(suite.CompilationError.Error()) return suite diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go index 8ed86111..79bfa87d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -40,7 +40,7 @@ func (ic InterruptCause) String() string { } type InterruptStatus struct { - Channel chan interface{} + Channel chan any Level InterruptLevel Cause InterruptCause } @@ -62,14 +62,14 @@ type InterruptHandlerInterface interface { } type InterruptHandler struct { - c chan interface{} + c chan any lock *sync.Mutex level InterruptLevel cause InterruptCause client parallel_support.Client - stop chan interface{} + stop chan any signals []os.Signal - requestAbortCheck chan interface{} + requestAbortCheck chan any } func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { @@ -77,10 +77,10 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) * signals = []os.Signal{os.Interrupt, syscall.SIGTERM} } handler := &InterruptHandler{ - c: make(chan interface{}), + c: make(chan any), lock: &sync.Mutex{}, - stop: make(chan interface{}), - requestAbortCheck: make(chan interface{}), + stop: make(chan any), + requestAbortCheck: make(chan any), client: client, signals: signals, } @@ -98,9 +98,9 @@ func (handler *InterruptHandler) registerForInterrupts() { signal.Notify(signalChannel, handler.signals...) // cross-process abort handling - var abortChannel chan interface{} + var abortChannel chan any if handler.client != nil { - abortChannel = make(chan interface{}) + abortChannel = make(chan any) go func() { pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) for { @@ -125,7 +125,7 @@ func (handler *InterruptHandler) registerForInterrupts() { }() } - go func(abortChannel chan interface{}) { + go func(abortChannel chan any) { var interruptCause InterruptCause for { select { @@ -151,7 +151,7 @@ func (handler *InterruptHandler) registerForInterrupts() { } if handler.level != oldLevel { close(handler.c) - handler.c = make(chan interface{}) + handler.c = make(chan any) } handler.lock.Unlock() } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go index b3cd6429..4234d802 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -30,7 +30,7 @@ type Server interface { Close() Address() string RegisterAlive(node int, alive func() bool) - GetSuiteDone() chan interface{} + GetSuiteDone() chan any GetOutputDestination() io.Writer SetOutputDestination(io.Writer) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go index 6547c7a6..4aa10ae4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -34,7 +34,7 @@ func (client *httpClient) Close() error { return nil } -func (client *httpClient) post(path string, data interface{}) error { +func (client *httpClient) post(path string, data any) error { var body io.Reader if data != nil { encoded, err := json.Marshal(data) @@ -54,7 +54,7 @@ func (client *httpClient) post(path string, data interface{}) error { return nil } -func (client *httpClient) poll(path string, data interface{}) error { +func (client *httpClient) poll(path string, data any) error { for { resp, err := http.Get(client.serverHost + path) if err != nil { @@ -153,10 +153,7 @@ func (client *httpClient) PostAbort() error { func (client *httpClient) ShouldAbort() bool { err := client.poll("/abort", nil) - if err == ErrorGone { - return true - } - return false + return err == ErrorGone } func (client *httpClient) Write(p []byte) (int, error) { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go index d2c71ab1..8a1b7a5b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -75,7 +75,7 @@ func (server *httpServer) Address() string { return "http://" + server.listener.Addr().String() } -func (server *httpServer) GetSuiteDone() chan interface{} { +func (server *httpServer) GetSuiteDone() chan any { return server.handler.done } @@ -96,7 +96,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) { // // The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` -func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { +func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object any) bool { defer request.Body.Close() if json.NewDecoder(request.Body).Decode(object) != nil { writer.WriteHeader(http.StatusBadRequest) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go index 59e8e6fd..bb4675a0 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -35,7 +35,7 @@ func (client *rpcClient) Close() error { return client.client.Close() } -func (client *rpcClient) poll(method string, data interface{}) error { +func (client *rpcClient) poll(method string, data any) error { for { err := client.client.Call(method, voidSender, data) if err == nil { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go index 2620fd56..1574f99a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go @@ -25,7 +25,7 @@ type RPCServer struct { handler *ServerHandler } -//Create a new server, automatically selecting a port +// Create a new server, automatically selecting a port func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -37,7 +37,7 @@ func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, e }, nil } -//Start the server. You don't need to `go s.Start()`, just `s.Start()` +// Start the server. You don't need to `go s.Start()`, just `s.Start()` func (server *RPCServer) Start() { rpcServer := rpc.NewServer() rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server @@ -48,17 +48,17 @@ func (server *RPCServer) Start() { go httpServer.Serve(server.listener) } -//Stop the server +// Stop the server func (server *RPCServer) Close() { server.listener.Close() } -//The address the server can be reached it. Pass this into the `ForwardingReporter`. +// The address the server can be reached it. Pass this into the `ForwardingReporter`. func (server *RPCServer) Address() string { return server.listener.Addr().String() } -func (server *RPCServer) GetSuiteDone() chan interface{} { +func (server *RPCServer) GetSuiteDone() chan any { return server.handler.done } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go index a6d98793..ab9e1137 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -18,7 +18,7 @@ var voidSender Void // It handles all the business logic to avoid duplication between the two servers type ServerHandler struct { - done chan interface{} + done chan any outputDestination io.Writer reporter reporters.Reporter alives []func() bool @@ -46,7 +46,7 @@ func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHan parallelTotal: parallelTotal, outputDestination: os.Stdout, - done: make(chan interface{}), + done: make(chan any), } } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 56b7be75..74ad0768 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,10 +182,31 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel + //should we completely omit this spec? + if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { + return + } + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("[%s]", report.LeafNodeType) @@ -262,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { } } - // If we have no content to show, jsut emit the header and return + // If we have no content to show, just emit the header and return if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) + if r.conf.ForceNewlines { + r.emit("\n") + } return } @@ -283,26 +307,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) } if showSeparateVisibilityAlwaysReportsSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) } if showTimeline { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) - r.emitTimeline(1, report, timeline) - r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) } // Emit Failure Message @@ -405,7 +426,15 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if r.conf.GithubOutput { + level := "error" + if state.Is(types.SpecStateSkipped) { + level = "notice" + } + r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) @@ -656,11 +685,11 @@ func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { } /* Rendering text */ -func (r *DefaultReporter) f(format string, args ...interface{}) string { +func (r *DefaultReporter) f(format string, args ...any) string { return r.formatter.F(format, args...) } -func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string { +func (r *DefaultReporter) fi(indentation uint, format string, args ...any) string { return r.formatter.Fi(indentation, format, args...) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go index be506f9b..5d3e8db9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -18,6 +18,7 @@ func GenerateJSONReport(report types.Report, destination string) error { if err != nil { return err } + defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", " ") err = enc.Encode([]types.Report{ @@ -26,7 +27,7 @@ func GenerateJSONReport(report types.Report, destination string) error { if err != nil { return err } - return f.Close() + return nil } // MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources @@ -57,11 +58,12 @@ func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, if err != nil { return messages, err } + defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", " ") err = enc.Encode(allReports) if err != nil { return messages, err } - return messages, f.Close() + return messages, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 81604220..562e0f62 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -15,6 +15,7 @@ import ( "fmt" "os" "path" + "regexp" "strings" "github.com/onsi/ginkgo/v2/config" @@ -104,6 +105,8 @@ type JUnitProperty struct { Value string `xml:"value,attr"` } +var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`) + type JUnitTestCase struct { // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" Name string `xml:"name,attr"` @@ -113,6 +116,8 @@ type JUnitTestCase struct { Status string `xml:"status,attr"` // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime Time float64 `xml:"time,attr"` + // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes. + Owner string `xml:"owner,attr,omitempty"` //Skipped is populated with a message if the test was skipped or pending Skipped *JUnitSkipped `xml:"skipped,omitempty"` //Error is populated if the test panicked or was interrupted @@ -172,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, @@ -195,6 +201,12 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } + owner := "" + for _, label := range labels { + if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 { + owner = matches[1] + } + } name = strings.TrimSpace(name) test := JUnitTestCase{ @@ -202,6 +214,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), + Owner: owner, } if !spec.State.Is(config.OmitTimelinesForSpecState) { test.SystemErr = systemErrForUnstructuredReporters(spec) @@ -312,6 +325,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) continue } err = xml.NewDecoder(f).Decode(&report) + _ = f.Close() if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go index 9cd57681..57e87517 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -149,7 +149,7 @@ func PruneStack(fullStackTrace string, skip int) string { re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) for i := 0; i < len(stack)/2; i++ { // We filter out based on the source code file name. - if !re.Match([]byte(stack[i*2+1])) { + if !re.MatchString(stack[i*2+1]) { prunedStack = append(prunedStack, stack[i*2]) prunedStack = append(prunedStack, stack[i*2+1]) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index c88fc85a..2e827efe 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -25,6 +25,7 @@ type SuiteConfig struct { SkipFiles []string LabelFilter string FailOnPending bool + FailOnEmpty bool FailFast bool FlakeAttempts int MustPassRepeatedly int @@ -89,6 +90,9 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool + GithubOutput bool + SilenceSkips bool + ForceNewlines bool JSONReport string JUnitReport string @@ -155,7 +159,7 @@ func (g CLIConfig) ComputedProcs() int { n := 1 if g.Parallel { - n = runtime.NumCPU() + n = runtime.GOMAXPROCS(-1) if n > 4 { n = n - 1 } @@ -168,7 +172,7 @@ func (g CLIConfig) ComputedNumCompilers() int { return g.NumCompilers } - return runtime.NumCPU() + return runtime.GOMAXPROCS(-1) } // Configuration for the Ginkgo CLI capturing available go flags @@ -198,6 +202,7 @@ type GoFlagsConfig struct { A bool ASMFlags string BuildMode string + BuildVCS bool Compiler string GCCGoFlags string GCFlags string @@ -215,6 +220,7 @@ type GoFlagsConfig struct { ToolExec string Work bool X bool + O string } func NewDefaultGoFlagsConfig() GoFlagsConfig { @@ -225,6 +231,10 @@ func (g GoFlagsConfig) BinaryMustBePreserved() bool { return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != "" } +func (g GoFlagsConfig) NeedsSymbols() bool { + return g.BinaryMustBePreserved() +} + // Configuration that were deprecated in 2.0 type deprecatedConfig struct { DebugParallel bool @@ -251,8 +261,12 @@ var FlagSections = GinkgoFlagSections{ {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, - {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"}, - {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"}, + {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis", + Description: "When generating a cover files, please pass a filename {{bold}}not{{/}} a path. To specify a different directory use {{magenta}}--output-dir{{/}}.", + }, + {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis", + Description: "When generating profile files, please pass filenames {{bold}}not{{/}} a path. Ginkgo will generate a profile file with the given name in the package's directory. To specify a different directory use {{magenta}}--output-dir{{/}}.", + }, {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, @@ -264,7 +278,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite."}, + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, @@ -274,6 +288,8 @@ var SuiteConfigFlags = GinkgoFlags{ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", + Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, @@ -320,7 +336,7 @@ var ParallelConfigFlags = GinkgoFlags{ // ReporterConfigFlags provides flags for the Ginkgo test process, and CLI var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", - Usage: "If set, suppress color output in default reporter."}, + Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"}, {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", Usage: "If set, emits more output including GinkgoWriter contents."}, {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", @@ -331,6 +347,12 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, + {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", + Usage: "If set, default reporter will not print out skipped tests."}, + {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", + Usage: "If set, default reporter will ensure a newline appears after each test."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, @@ -351,7 +373,7 @@ var ReporterConfigFlags = GinkgoFlags{ func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) flags = flags.WithPrefix("ginkgo") - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": suiteConfig, "R": reporterConfig, "D": &deprecatedConfig{}, @@ -499,9 +521,9 @@ var GinkgoCLIWatchFlags = GinkgoFlags{ // GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI var GoBuildFlags = GinkgoFlags{ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", - Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", - Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, + Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty (by explicitly passing --vet=""), "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", @@ -515,6 +537,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "arguments to pass on each go tool asm invocation."}, {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build", + Usage: "adds version control information."}, {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", @@ -549,12 +573,14 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, + {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", + Usage: "output binary path (including name)."}, } // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI var GoRunFlags = GinkgoFlags{ {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", - Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`}, + Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover. Must be passed a filename, not a path. Use output-dir to control the location of the output.`}, {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", @@ -582,6 +608,22 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails()) } + if strings.ContainsRune(goFlagsConfig.CoverProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--coverprofile", goFlagsConfig.CoverProfile)) + } + if strings.ContainsRune(goFlagsConfig.CPUProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--cpuprofile", goFlagsConfig.CPUProfile)) + } + if strings.ContainsRune(goFlagsConfig.MemProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--memprofile", goFlagsConfig.MemProfile)) + } + if strings.ContainsRune(goFlagsConfig.BlockProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--blockprofile", goFlagsConfig.BlockProfile)) + } + if strings.ContainsRune(goFlagsConfig.MutexProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--mutexprofile", goFlagsConfig.MutexProfile)) + } + //initialize the output directory if cliConfig.OutputDir != "" { err := os.MkdirAll(cliConfig.OutputDir, 0777) @@ -602,7 +644,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string, preserveSymbols bool) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { @@ -625,10 +667,14 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") } - args := []string{"test", "-c", "-o", destination, packageToBuild} + if !goFlagsConfig.NeedsSymbols() && goFlagsConfig.LDFlags == "" && !preserveSymbols { + goFlagsConfig.LDFlags = "-w -s" + } + + args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, - map[string]interface{}{ + map[string]any{ "Go": &goFlagsConfig, }, ) @@ -647,7 +693,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...) flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...) flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": &suiteConfig, "R": &reporterConfig, "Go": &goFlagsConfig, @@ -659,7 +705,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC // GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) { flags := GoRunFlags.WithPrefix("test") - bindings := map[string]interface{}{ + bindings := map[string]any{ "Go": &goFlagsConfig, } @@ -681,7 +727,7 @@ func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterCo flags = flags.CopyAppend(GoBuildFlags...) flags = flags.CopyAppend(GoRunFlags...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": suiteConfig, "R": reporterConfig, "C": cliConfig, @@ -702,7 +748,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter flags = flags.CopyAppend(GoBuildFlags...) flags = flags.CopyAppend(GoRunFlags...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": suiteConfig, "R": reporterConfig, "C": cliConfig, @@ -718,7 +764,7 @@ func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig flags := GinkgoCLISharedFlags flags = flags.CopyAppend(GoBuildFlags...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "C": cliConfig, "Go": goFlagsConfig, "D": &deprecatedConfig{}, @@ -742,7 +788,7 @@ func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) { flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package") - bindings := map[string]interface{}{ + bindings := map[string]any{ "C": cliConfig, } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go index 17922304..518989a8 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go @@ -113,7 +113,7 @@ type DeprecatedSpecFailure struct { type DeprecatedSpecMeasurement struct { Name string - Info interface{} + Info any Order int Results []float64 diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 4fbdc3e9..c2796b54 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -88,7 +88,7 @@ body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, n } } -func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error { +func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic any, cl CodeLocation) error { return GinkgoError{ Heading: "Assertion or Panic detected during tree construction", Message: formatter.F( @@ -189,7 +189,7 @@ func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl } } -func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { +func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator any) error { return GinkgoError{ Heading: "Unknown Decorator", Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), @@ -345,7 +345,7 @@ func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { } /* ReportEntry errors */ -func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error { +func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg any) error { return GinkgoError{ Heading: "Too Many ReportEntry Values", Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), @@ -505,6 +505,15 @@ func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, ac } } +func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error { + return GinkgoError{ + Heading: "Contexts cannot be used in subtree tables", + Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + /* Parallel Synchronization errors */ func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { @@ -530,7 +539,7 @@ func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { /* Configuration errors */ -func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error { +func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value any) error { return GinkgoError{ Heading: "Unknown Type passed to RunSpecs", Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), @@ -620,6 +629,20 @@ func (g ginkgoErrors) BothRepeatAndUntilItFails() error { } } +func (g ginkgoErrors) ExpectFilenameNotPath(flag string, path string) error { + return GinkgoError{ + Heading: fmt.Sprintf("%s expects a filename but was given a path: %s", flag, path), + Message: fmt.Sprintf("%s takes a filename, not a path. Use --output-dir to specify a directory to collect all test outputs.", flag), + } +} + +func (g ginkgoErrors) FlagAfterPositionalParameter() error { + return GinkgoError{ + Heading: "Malformed arguments - detected a flag after the package liste", + Message: "Make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages (or './...').\n{{gray}}e.g. 'ginkgo run -p my_package' is valid but `ginkgo -p run my_package` is not.\n{{gray}}e.g. 'ginkgo -p -vet=\"\" ./...' is valid but 'ginkgo -p ./... -vet=\"\"' is not{{/}}", + } +} + /* Stack-Trace parsing errors */ func (g ginkgoErrors) FailedToParseStackTrace(message string) error { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 9186ae87..8409653f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,7 +24,8 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string + ExportAs string + AlwaysExport bool } type GinkgoFlags []GinkgoFlag @@ -91,7 +92,7 @@ func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) { type GinkgoFlagSet struct { flags GinkgoFlags - bindings interface{} + bindings any sections GinkgoFlagSections extraGoFlagsSection GinkgoFlagSection @@ -100,7 +101,7 @@ type GinkgoFlagSet struct { } // Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet -func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) { +func NewGinkgoFlagSet(flags GinkgoFlags, bindings any, sections GinkgoFlagSections) (GinkgoFlagSet, error) { return bindFlagSet(GinkgoFlagSet{ flags: flags, bindings: bindings, @@ -109,7 +110,7 @@ func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFl } // Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet -func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { +func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings any, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { return bindFlagSet(GinkgoFlagSet{ flags: flags, bindings: bindings, @@ -334,7 +335,7 @@ func (f GinkgoFlagSet) substituteUsage() { fmt.Fprintln(f.flagSet.Output(), f.Usage()) } -func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) { +func valueAtKeyPath(root any, keyPath string) (reflect.Value, bool) { if len(keyPath) == 0 { return reflect.Value{}, false } @@ -431,8 +432,8 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. -func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +func GenerateFlagArgs(flags GinkgoFlags, bindings any) ([]string, error) { result := []string{} for _, flag := range flags { name := flag.ExportAs @@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" { + if iface.(string) != "" || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 { + if iface.(int64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 { + if iface.(float64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 { + if iface.(int) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) { + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index b0d3b651..40a909b6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) || b(labels) } } +func labelSetFor(key string, labels []string) map[string]bool { + key = strings.ToLower(strings.TrimSpace(key)) + out := map[string]bool{} + for _, label := range labels { + components := strings.SplitN(label, ":", 2) + if len(components) < 2 { + continue + } + if key == strings.ToLower(strings.TrimSpace(components[0])) { + out[strings.ToLower(strings.TrimSpace(components[1]))] = true + } + } + + return out +} + +func isEmptyLabelSetAction(key string) LabelFilter { + return func(labels []string) bool { + return len(labelSetFor(key, labels)) == 0 + } +} + +func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if set[value] { + return true + } + } + return false + } +} + +func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + if len(set) != len(expectedValues) { + return false + } + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { + expectedSet := map[string]bool{} + for _, value := range expectedValues { + expectedSet[value] = true + } + return func(labels []string) bool { + set := labelSetFor(key, labels) + for value := range set { + if !expectedSet[value] { + return false + } + } + return true + } +} + type lfToken uint const ( @@ -58,6 +135,9 @@ const ( lfTokenOr lfTokenRegexp lfTokenLabel + lfTokenSetKey + lfTokenSetOperation + lfTokenSetArgument lfTokenEOF ) @@ -71,6 +151,8 @@ func (l lfToken) Precedence() int { return 2 case lfTokenNot: return 3 + case lfTokenSetOperation: + return 4 } return -1 } @@ -93,6 +175,12 @@ func (l lfToken) String() string { return "/regexp/" case lfTokenLabel: return "label" + case lfTokenSetKey: + return "set_key" + case lfTokenSetOperation: + return "set_operation" + case lfTokenSetArgument: + return "set_argument" case lfTokenEOF: return "EOF" } @@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) } return matchLabelRegexAction(re), nil + case lfTokenSetOperation: + tokenSetOperation := strings.ToLower(tn.value) + if tokenSetOperation == "isempty" { + return isEmptyLabelSetAction(tn.leftNode.value), nil + } + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) + } + + rawValues := strings.Split(tn.rightNode.value, ",") + values := make([]string, len(rawValues)) + for i := range rawValues { + values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) + if strings.ContainsAny(values[i], "&|!,()/") { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) + } else if values[i] == "" { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") + } + } + switch tokenSetOperation { + case "containsany": + return containsAnyLabelSetAction(tn.leftNode.value, values), nil + case "containsall": + return containsAllLabelSetAction(tn.leftNode.value, values), nil + case "consistsof": + return consistsOfLabelSetAction(tn.leftNode.value, values), nil + case "issubsetof": + return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil + } } if tn.rightNode == nil { @@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string { return out } +var validSetOperations = map[string]string{ + "containsany": "containsAny", + "containsall": "containsAll", + "consistsof": "consistsOf", + "issubsetof": "isSubsetOf", + "isempty": "isEmpty", +} + func tokenize(input string) func() (*treeNode, error) { + lastToken := lfTokenInvalid + lastValue := "" runes, i := []rune(input), 0 peekIs := func(r rune) bool { @@ -216,7 +343,7 @@ func tokenize(input string) func() (*treeNode, error) { consumeUntil := func(cutset string) (string, int) { j := i for ; j < len(runes); j++ { - if strings.IndexRune(cutset, runes[j]) >= 0 { + if strings.ContainsRune(cutset, runes[j]) { break } } @@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) { } node := &treeNode{location: i} + defer func() { + lastToken = node.token + lastValue = node.value + }() + + if lastToken == lfTokenSetKey { + //we should get a valid set operation next + value, n := consumeUntil(" )") + if validSetOperations[strings.ToLower(value)] == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) + } + i += n + node.token, node.value = lfTokenSetOperation, value + return node, nil + } + if lastToken == lfTokenSetOperation { + //we should get an argument next, if we aren't isempty + var arg = "" + origI := i + if runes[i] == '{' { + i += 1 + value, n := consumeUntil("}") + if i+n >= len(runes) { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") + } + i += n + 1 + arg = value + } else { + value, n := consumeUntil("&|!,()/") + i += n + arg = strings.TrimSpace(value) + } + if strings.ToLower(lastValue) == "isempty" && arg != "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) + } + if arg == "" && strings.ToLower(lastValue) != "isempty" { + if i < len(runes) && runes[i] == '/' { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") + } else { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) + } + } + // note that we sent an empty SetArgument token if we are isempty + node.token, node.value = lfTokenSetArgument, arg + return node, nil + } + switch runes[i] { case '&': if !peekIs('&') { @@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) { i += n + 1 node.token, node.value = lfTokenRegexp, value default: - value, n := consumeUntil("&|!,()/") + value, n := consumeUntil("&|!,()/:") i += n + value = strings.TrimSpace(value) + + //are we the beginning of a set operation? + if i < len(runes) && runes[i] == ':' { + if peekIs(' ') { + if value == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") + } + i += 1 + //we are the beginning of a set operation + node.token, node.value = lfTokenSetKey, value + return node, nil + } + additionalValue, n := consumeUntil("&|!,()/") + additionalValue = strings.TrimSpace(additionalValue) + if additionalValue == ":" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") + } + i += n + value += additionalValue + } + + valueToCheckForSetOperation := strings.ToLower(value) + for setOperation := range validSetOperations { + idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) + if idx > 0 { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) + } + } + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) } return node, nil @@ -307,7 +511,7 @@ LOOP: switch node.token { case lfTokenEOF: break LOOP - case lfTokenLabel, lfTokenRegexp: + case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") } @@ -326,6 +530,18 @@ LOOP: node.setLeftNode(nodeToStealFrom.rightNode) nodeToStealFrom.setRightNode(node) current = node + case lfTokenSetOperation: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) + } + node.setLeftNode(current.rightNode) + current.setRightNode(node) + current = node + case lfTokenSetArgument: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) + } + current.setRightNode(node) case lfTokenCloseGroup: firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() if firstUnmatchedOpenNode == nil { @@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { if strings.ContainsAny(out, "&|!,()/") { return "", GinkgoErrors.InvalidLabel(label, cl) } + if out[0] == ':' { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + if strings.Contains(out, ":") { + components := strings.SplitN(out, ":", 2) + if len(components) < 2 || components[1] == "" { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + } return out, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go index 7b1524b5..63f7a9f6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -9,18 +9,18 @@ import ( // ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports // and across the network connection when running in parallel type ReportEntryValue struct { - raw interface{} //unexported to prevent gob from freaking out about unregistered structs + raw any //unexported to prevent gob from freaking out about unregistered structs AsJSON string Representation string } -func WrapEntryValue(value interface{}) ReportEntryValue { +func WrapEntryValue(value any) ReportEntryValue { return ReportEntryValue{ raw: value, } } -func (rev ReportEntryValue) GetRawValue() interface{} { +func (rev ReportEntryValue) GetRawValue() any { return rev.raw } @@ -118,7 +118,7 @@ func (entry ReportEntry) StringRepresentation() string { // If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be // a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON // field yourself. -func (entry ReportEntry) GetRawValue() interface{} { +func (entry ReportEntry) GetRawValue() any { return entry.Value.GetRawValue() } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index aae69b04..ddcbec1b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -3,13 +3,21 @@ package types import ( "encoding/json" "fmt" + "os" "sort" "strings" "time" ) const GINKGO_FOCUS_EXIT_CODE = 197 -const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +func init() { + if os.Getenv("GINKGO_TIME_FORMAT") != "" { + GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT") + } +} // Report captures information about a Ginkgo test run type Report struct { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index a37f3082..158ac2fd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.13.0" +const VERSION = "2.23.4" diff --git a/vendor/github.com/quic-go/quic-go/.gitignore b/vendor/github.com/quic-go/quic-go/.gitignore index 3cc06f24..b454729d 100644 --- a/vendor/github.com/quic-go/quic-go/.gitignore +++ b/vendor/github.com/quic-go/quic-go/.gitignore @@ -4,6 +4,7 @@ main mockgen_tmp.go *.qtr *.qlog +*.sqlog *.txt race.[0-9]* diff --git a/vendor/github.com/quic-go/quic-go/.golangci.yml b/vendor/github.com/quic-go/quic-go/.golangci.yml index 7c4b71c0..656fa76d 100644 --- a/vendor/github.com/quic-go/quic-go/.golangci.yml +++ b/vendor/github.com/quic-go/quic-go/.golangci.yml @@ -1,45 +1,85 @@ -linters-settings: - misspell: - ignore-words: - - ect - depguard: - rules: - quicvarint: - list-mode: strict - files: - - "**/github.com/quic-go/quic-go/quicvarint/*" - - "!$test" - allow: - - $gostd - +version: "2" linters: - disable-all: true + default: none enable: - asciicheck + - copyloopvar - depguard - exhaustive - - exportloopref - - goimports - - gofmt # redundant, since gofmt *should* be a no-op after gofumpt - - gofumpt - - gosimple - govet - ineffassign - misspell - prealloc - staticcheck - - stylecheck - unconvert - unparam - unused - -issues: - exclude-files: - - internal/handshake/cipher_suite.go - exclude-rules: - - path: internal/qtls - linters: - - depguard - - path: _test\.go - linters: - - exhaustive + settings: + depguard: + rules: + random: + deny: + - pkg: "math/rand$" + desc: use math/rand/v2 + - pkg: "golang.org/x/exp/rand" + desc: use math/rand/v2 + quicvarint: + list-mode: strict + files: + - '**/github.com/quic-go/quic-go/quicvarint/*' + - '!$test' + allow: + - $gostd + rsa: + list-mode: original + deny: + - pkg: crypto/rsa + desc: "use crypto/ed25519 instead" + misspell: + ignore-rules: + - ect + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - depguard + path: internal/qtls + - linters: + - exhaustive + - prealloc + - unparam + path: _test\.go + - linters: + - staticcheck + path: _test\.go + text: 'SA1029:' # inappropriate key in call to context.WithValue + # WebTransport still relies on the ConnectionTracingID and ConnectionTracingKey. + # See https://github.com/quic-go/quic-go/issues/4405 for more details. + - linters: + - staticcheck + paths: + - http3/ + - integrationtests/self/http_test.go + text: 'SA1019:.+quic\.ConnectionTracing(ID|Key)' + paths: + - internal/handshake/cipher_suite.go + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - gofumpt + - goimports + exclusions: + generated: lax + paths: + - internal/handshake/cipher_suite.go + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/quic-go/quic-go/README.md b/vendor/github.com/quic-go/quic-go/README.md index 94823d99..ccc9e213 100644 --- a/vendor/github.com/quic-go/quic-go/README.md +++ b/vendor/github.com/quic-go/quic-go/README.md @@ -9,7 +9,8 @@ quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go. It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) and HTTP Datagrams ([RFC 9297](https://datatracker.ietf.org/doc/html/rfc9297)). -In addition to these base RFCs, it also implements the following RFCs: +In addition to these base RFCs, it also implements the following RFCs: + * Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) * Datagram Packetization Layer Path MTU Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899)) * QUIC Version 2 ([RFC 9369](https://datatracker.ietf.org/doc/html/rfc9369)) @@ -33,6 +34,7 @@ Detailed documentation can be found on [quic-go.net](https://quic-go.net/docs/). | [Hysteria](https://github.com/apernet/hysteria) | A powerful, lightning fast and censorship resistant proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/apernet/hysteria?style=flat-square) | | [Mercure](https://github.com/dunglas/mercure) | An open, easy, fast, reliable and battery-efficient solution for real-time communications | ![GitHub Repo stars](https://img.shields.io/github/stars/dunglas/mercure?style=flat-square) | | [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) | +| [reverst](https://github.com/flipt-io/reverst) | Reverse Tunnels in Go over HTTP/3 and QUIC | ![GitHub Repo stars](https://img.shields.io/github/stars/flipt-io/reverst?style=flat-square) | | [RoadRunner](https://github.com/roadrunner-server/roadrunner) | High-performance PHP application server, process manager written in Go and powered with plugins | ![GitHub Repo stars](https://img.shields.io/github/stars/roadrunner-server/roadrunner?style=flat-square) | | [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) | | [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) | diff --git a/vendor/github.com/quic-go/quic-go/client.go b/vendor/github.com/quic-go/quic-go/client.go index 1c5654f6..26cd8358 100644 --- a/vendor/github.com/quic-go/quic-go/client.go +++ b/vendor/github.com/quic-go/quic-go/client.go @@ -7,45 +7,15 @@ import ( "net" "github.com/quic-go/quic-go/internal/protocol" - "github.com/quic-go/quic-go/internal/utils" - "github.com/quic-go/quic-go/logging" ) -type client struct { - sendConn sendConn - - use0RTT bool - - packetHandlers packetHandlerManager - onClose func() - - tlsConf *tls.Config - config *Config - - connIDGenerator ConnectionIDGenerator - srcConnID protocol.ConnectionID - destConnID protocol.ConnectionID - - initialPacketNumber protocol.PacketNumber - hasNegotiatedVersion bool - version protocol.Version - - handshakeChan chan struct{} - - conn quicConn - - tracer *logging.ConnectionTracer - tracingID ConnectionTracingID - logger utils.Logger -} - // make it possible to mock connection ID for initial generation in the tests var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial // DialAddr establishes a new QUIC connection to a server. // It resolves the address, and then creates a new UDP connection to dial the QUIC server. // When the QUIC connection is closed, this UDP connection is closed. -// See Dial for more details. +// See [Dial] for more details. func DialAddr(ctx context.Context, addr string, tlsConf *tls.Config, conf *Config) (Connection, error) { udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) if err != nil { @@ -63,7 +33,7 @@ func DialAddr(ctx context.Context, addr string, tlsConf *tls.Config, conf *Confi } // DialAddrEarly establishes a new 0-RTT QUIC connection to a server. -// See DialAddr for more details. +// See [DialAddr] for more details. func DialAddrEarly(ctx context.Context, addr string, tlsConf *tls.Config, conf *Config) (EarlyConnection, error) { udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) if err != nil { @@ -86,7 +56,7 @@ func DialAddrEarly(ctx context.Context, addr string, tlsConf *tls.Config, conf * } // DialEarly establishes a new 0-RTT QUIC connection to a server using a net.PacketConn. -// See Dial for more details. +// See [Dial] for more details. func DialEarly(ctx context.Context, c net.PacketConn, addr net.Addr, tlsConf *tls.Config, conf *Config) (EarlyConnection, error) { dl, err := setupTransport(c, tlsConf, false) if err != nil { @@ -101,12 +71,12 @@ func DialEarly(ctx context.Context, c net.PacketConn, addr net.Addr, tlsConf *tl } // Dial establishes a new QUIC connection to a server using a net.PacketConn. -// If the PacketConn satisfies the OOBCapablePacketConn interface (as a net.UDPConn does), +// If the PacketConn satisfies the [OOBCapablePacketConn] interface (as a [net.UDPConn] does), // ECN and packet info support will be enabled. In this case, ReadMsgUDP and WriteMsgUDP // will be used instead of ReadFrom and WriteTo to read/write packets. // The tls.Config must define an application protocol (using NextProtos). // -// This is a convenience function. More advanced use cases should instantiate a Transport, +// This is a convenience function. More advanced use cases should instantiate a [Transport], // which offers configuration options for a more fine-grained control of the connection establishment, // including reusing the underlying UDP socket for multiple QUIC connections. func Dial(ctx context.Context, c net.PacketConn, addr net.Addr, tlsConf *tls.Config, conf *Config) (Connection, error) { @@ -132,120 +102,3 @@ func setupTransport(c net.PacketConn, tlsConf *tls.Config, createdPacketConn boo isSingleUse: true, }, nil } - -func dial( - ctx context.Context, - conn sendConn, - connIDGenerator ConnectionIDGenerator, - packetHandlers packetHandlerManager, - tlsConf *tls.Config, - config *Config, - onClose func(), - use0RTT bool, -) (quicConn, error) { - c, err := newClient(conn, connIDGenerator, config, tlsConf, onClose, use0RTT) - if err != nil { - return nil, err - } - c.packetHandlers = packetHandlers - - c.tracingID = nextConnTracingID() - if c.config.Tracer != nil { - c.tracer = c.config.Tracer(context.WithValue(ctx, ConnectionTracingKey, c.tracingID), protocol.PerspectiveClient, c.destConnID) - } - if c.tracer != nil && c.tracer.StartedConnection != nil { - c.tracer.StartedConnection(c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID) - } - if err := c.dial(ctx); err != nil { - return nil, err - } - return c.conn, nil -} - -func newClient(sendConn sendConn, connIDGenerator ConnectionIDGenerator, config *Config, tlsConf *tls.Config, onClose func(), use0RTT bool) (*client, error) { - srcConnID, err := connIDGenerator.GenerateConnectionID() - if err != nil { - return nil, err - } - destConnID, err := generateConnectionIDForInitial() - if err != nil { - return nil, err - } - c := &client{ - connIDGenerator: connIDGenerator, - srcConnID: srcConnID, - destConnID: destConnID, - sendConn: sendConn, - use0RTT: use0RTT, - onClose: onClose, - tlsConf: tlsConf, - config: config, - version: config.Versions[0], - handshakeChan: make(chan struct{}), - logger: utils.DefaultLogger.WithPrefix("client"), - } - return c, nil -} - -func (c *client) dial(ctx context.Context) error { - c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID, c.version) - - c.conn = newClientConnection( - context.WithValue(context.WithoutCancel(ctx), ConnectionTracingKey, c.tracingID), - c.sendConn, - c.packetHandlers, - c.destConnID, - c.srcConnID, - c.connIDGenerator, - c.config, - c.tlsConf, - c.initialPacketNumber, - c.use0RTT, - c.hasNegotiatedVersion, - c.tracer, - c.logger, - c.version, - ) - c.packetHandlers.Add(c.srcConnID, c.conn) - - errorChan := make(chan error, 1) - recreateChan := make(chan errCloseForRecreating) - go func() { - err := c.conn.run() - var recreateErr *errCloseForRecreating - if errors.As(err, &recreateErr) { - recreateChan <- *recreateErr - return - } - if c.onClose != nil { - c.onClose() - } - errorChan <- err // returns as soon as the connection is closed - }() - - // only set when we're using 0-RTT - // Otherwise, earlyConnChan will be nil. Receiving from a nil chan blocks forever. - var earlyConnChan <-chan struct{} - if c.use0RTT { - earlyConnChan = c.conn.earlyConnReady() - } - - select { - case <-ctx.Done(): - c.conn.destroy(nil) - return context.Cause(ctx) - case err := <-errorChan: - return err - case recreateErr := <-recreateChan: - c.initialPacketNumber = recreateErr.nextPacketNumber - c.version = recreateErr.nextVersion - c.hasNegotiatedVersion = true - return c.dial(ctx) - case <-earlyConnChan: - // ready to send 0-RTT data - return nil - case <-c.conn.HandshakeComplete(): - // handshake successfully completed - return nil - } -} diff --git a/vendor/github.com/quic-go/quic-go/closed_conn.go b/vendor/github.com/quic-go/quic-go/closed_conn.go index 83338532..6486ea65 100644 --- a/vendor/github.com/quic-go/quic-go/closed_conn.go +++ b/vendor/github.com/quic-go/quic-go/closed_conn.go @@ -3,6 +3,7 @@ package quic import ( "math/bits" "net" + "sync/atomic" "github.com/quic-go/quic-go/internal/utils" ) @@ -11,7 +12,7 @@ import ( // When receiving packets for such a connection, we need to retransmit the packet containing the CONNECTION_CLOSE frame, // with an exponential backoff. type closedLocalConn struct { - counter uint32 + counter atomic.Uint32 logger utils.Logger sendPacket func(net.Addr, packetInfo) @@ -28,13 +29,13 @@ func newClosedLocalConn(sendPacket func(net.Addr, packetInfo), logger utils.Logg } func (c *closedLocalConn) handlePacket(p receivedPacket) { - c.counter++ + n := c.counter.Add(1) // exponential backoff // only send a CONNECTION_CLOSE for the 1st, 2nd, 4th, 8th, 16th, ... packet arriving - if bits.OnesCount32(c.counter) != 1 { + if bits.OnesCount32(n) != 1 { return } - c.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", c.counter) + c.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", n) c.sendPacket(p.remoteAddr, p.info) } diff --git a/vendor/github.com/quic-go/quic-go/codecov.yml b/vendor/github.com/quic-go/quic-go/codecov.yml index 59e4b58f..77e47fbe 100644 --- a/vendor/github.com/quic-go/quic-go/codecov.yml +++ b/vendor/github.com/quic-go/quic-go/codecov.yml @@ -6,6 +6,8 @@ coverage: - internal/handshake/cipher_suite.go - internal/utils/linkedlist/linkedlist.go - internal/testdata + - logging/connection_tracer_multiplexer.go + - logging/tracer_multiplexer.go - testutils/ - fuzzing/ - metrics/ diff --git a/vendor/github.com/quic-go/quic-go/config.go b/vendor/github.com/quic-go/quic-go/config.go index d42bdc1c..540a3240 100644 --- a/vendor/github.com/quic-go/quic-go/config.go +++ b/vendor/github.com/quic-go/quic-go/config.go @@ -8,7 +8,7 @@ import ( "github.com/quic-go/quic-go/quicvarint" ) -// Clone clones a Config +// Clone clones a Config. func (c *Config) Clone() *Config { copy := *c return © diff --git a/vendor/github.com/quic-go/quic-go/conn_id_generator.go b/vendor/github.com/quic-go/quic-go/conn_id_generator.go index d7be6540..e05fbd7f 100644 --- a/vendor/github.com/quic-go/quic-go/conn_id_generator.go +++ b/vendor/github.com/quic-go/quic-go/conn_id_generator.go @@ -8,41 +8,67 @@ import ( "github.com/quic-go/quic-go/internal/wire" ) +type connRunnerCallbacks struct { + AddConnectionID func(protocol.ConnectionID) + RemoveConnectionID func(protocol.ConnectionID) + RetireConnectionID func(protocol.ConnectionID) + ReplaceWithClosed func([]protocol.ConnectionID, []byte) +} + +type connRunners map[transportID]connRunnerCallbacks + +func (cr connRunners) AddConnectionID(id protocol.ConnectionID) { + for _, c := range cr { + c.AddConnectionID(id) + } +} + +func (cr connRunners) RemoveConnectionID(id protocol.ConnectionID) { + for _, c := range cr { + c.RemoveConnectionID(id) + } +} + +func (cr connRunners) RetireConnectionID(id protocol.ConnectionID) { + for _, c := range cr { + c.RetireConnectionID(id) + } +} + +func (cr connRunners) ReplaceWithClosed(ids []protocol.ConnectionID, b []byte) { + for _, c := range cr { + c.ReplaceWithClosed(ids, b) + } +} + type connIDGenerator struct { - generator ConnectionIDGenerator - highestSeq uint64 + generator ConnectionIDGenerator + highestSeq uint64 + connRunners connRunners activeSrcConnIDs map[uint64]protocol.ConnectionID initialClientDestConnID *protocol.ConnectionID // nil for the client - addConnectionID func(protocol.ConnectionID) - getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken - removeConnectionID func(protocol.ConnectionID) - retireConnectionID func(protocol.ConnectionID) - replaceWithClosed func([]protocol.ConnectionID, []byte) - queueControlFrame func(wire.Frame) + statelessResetter *statelessResetter + + queueControlFrame func(wire.Frame) } func newConnIDGenerator( + tID transportID, initialConnectionID protocol.ConnectionID, initialClientDestConnID *protocol.ConnectionID, // nil for the client - addConnectionID func(protocol.ConnectionID), - getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken, - removeConnectionID func(protocol.ConnectionID), - retireConnectionID func(protocol.ConnectionID), - replaceWithClosed func([]protocol.ConnectionID, []byte), + statelessResetter *statelessResetter, + connRunner connRunnerCallbacks, queueControlFrame func(wire.Frame), generator ConnectionIDGenerator, ) *connIDGenerator { m := &connIDGenerator{ - generator: generator, - activeSrcConnIDs: make(map[uint64]protocol.ConnectionID), - addConnectionID: addConnectionID, - getStatelessResetToken: getStatelessResetToken, - removeConnectionID: removeConnectionID, - retireConnectionID: retireConnectionID, - replaceWithClosed: replaceWithClosed, - queueControlFrame: queueControlFrame, + generator: generator, + activeSrcConnIDs: make(map[uint64]protocol.ConnectionID), + statelessResetter: statelessResetter, + connRunners: map[transportID]connRunnerCallbacks{tID: connRunner}, + queueControlFrame: queueControlFrame, } m.activeSrcConnIDs[0] = initialConnectionID m.initialClientDestConnID = initialClientDestConnID @@ -85,7 +111,7 @@ func (m *connIDGenerator) Retire(seq uint64, sentWithDestConnID protocol.Connect ErrorMessage: fmt.Sprintf("retired connection ID %d (%s), which was used as the Destination Connection ID on this packet", seq, connID), } } - m.retireConnectionID(connID) + m.connRunners.RetireConnectionID(connID) delete(m.activeSrcConnIDs, seq) // Don't issue a replacement for the initial connection ID. if seq == 0 { @@ -100,11 +126,11 @@ func (m *connIDGenerator) issueNewConnID() error { return err } m.activeSrcConnIDs[m.highestSeq+1] = connID - m.addConnectionID(connID) + m.connRunners.AddConnectionID(connID) m.queueControlFrame(&wire.NewConnectionIDFrame{ SequenceNumber: m.highestSeq + 1, ConnectionID: connID, - StatelessResetToken: m.getStatelessResetToken(connID), + StatelessResetToken: m.statelessResetter.GetStatelessResetToken(connID), }) m.highestSeq++ return nil @@ -112,17 +138,17 @@ func (m *connIDGenerator) issueNewConnID() error { func (m *connIDGenerator) SetHandshakeComplete() { if m.initialClientDestConnID != nil { - m.retireConnectionID(*m.initialClientDestConnID) + m.connRunners.RetireConnectionID(*m.initialClientDestConnID) m.initialClientDestConnID = nil } } func (m *connIDGenerator) RemoveAll() { if m.initialClientDestConnID != nil { - m.removeConnectionID(*m.initialClientDestConnID) + m.connRunners.RemoveConnectionID(*m.initialClientDestConnID) } for _, connID := range m.activeSrcConnIDs { - m.removeConnectionID(connID) + m.connRunners.RemoveConnectionID(connID) } } @@ -134,5 +160,20 @@ func (m *connIDGenerator) ReplaceWithClosed(connClose []byte) { for _, connID := range m.activeSrcConnIDs { connIDs = append(connIDs, connID) } - m.replaceWithClosed(connIDs, connClose) + m.connRunners.ReplaceWithClosed(connIDs, connClose) +} + +func (m *connIDGenerator) AddConnRunner(id transportID, r connRunnerCallbacks) { + // The transport might have already been added earlier. + // This happens if the application migrates back to and old path. + if _, ok := m.connRunners[id]; ok { + return + } + m.connRunners[id] = r + if m.initialClientDestConnID != nil { + r.AddConnectionID(*m.initialClientDestConnID) + } + for _, connID := range m.activeSrcConnIDs { + r.AddConnectionID(connID) + } } diff --git a/vendor/github.com/quic-go/quic-go/conn_id_manager.go b/vendor/github.com/quic-go/quic-go/conn_id_manager.go index 4aa3f749..5513b645 100644 --- a/vendor/github.com/quic-go/quic-go/conn_id_manager.go +++ b/vendor/github.com/quic-go/quic-go/conn_id_manager.go @@ -2,11 +2,11 @@ package quic import ( "fmt" + "slices" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/qerr" "github.com/quic-go/quic-go/internal/utils" - list "github.com/quic-go/quic-go/internal/utils/linkedlist" "github.com/quic-go/quic-go/internal/wire" ) @@ -17,7 +17,10 @@ type newConnID struct { } type connIDManager struct { - queue list.List[newConnID] + queue []newConnID + + highestProbingID uint64 + pathProbing map[pathID]newConnID // initialized lazily handshakeComplete bool activeSequenceNumber uint64 @@ -35,6 +38,8 @@ type connIDManager struct { addStatelessResetToken func(protocol.StatelessResetToken) removeStatelessResetToken func(protocol.StatelessResetToken) queueControlFrame func(wire.Frame) + + closed bool } func newConnIDManager( @@ -48,6 +53,7 @@ func newConnIDManager( addStatelessResetToken: addStatelessResetToken, removeStatelessResetToken: removeStatelessResetToken, queueControlFrame: queueControlFrame, + queue: make([]newConnID, 0, protocol.MaxActiveConnectionIDs), } } @@ -59,36 +65,51 @@ func (h *connIDManager) Add(f *wire.NewConnectionIDFrame) error { if err := h.add(f); err != nil { return err } - if h.queue.Len() >= protocol.MaxActiveConnectionIDs { + if len(h.queue) >= protocol.MaxActiveConnectionIDs { return &qerr.TransportError{ErrorCode: qerr.ConnectionIDLimitError} } return nil } func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error { + if h.activeConnectionID.Len() == 0 { + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "received NEW_CONNECTION_ID frame but zero-length connection IDs are in use", + } + } // If the NEW_CONNECTION_ID frame is reordered, such that its sequence number is smaller than the currently active // connection ID or if it was already retired, send the RETIRE_CONNECTION_ID frame immediately. - if f.SequenceNumber < h.activeSequenceNumber || f.SequenceNumber < h.highestRetired { + if f.SequenceNumber < max(h.activeSequenceNumber, h.highestProbingID) || f.SequenceNumber < h.highestRetired { h.queueControlFrame(&wire.RetireConnectionIDFrame{ SequenceNumber: f.SequenceNumber, }) return nil } + if f.RetirePriorTo != 0 && h.pathProbing != nil { + for id, entry := range h.pathProbing { + if entry.SequenceNumber < f.RetirePriorTo { + h.queueControlFrame(&wire.RetireConnectionIDFrame{ + SequenceNumber: entry.SequenceNumber, + }) + h.removeStatelessResetToken(entry.StatelessResetToken) + delete(h.pathProbing, id) + } + } + } // Retire elements in the queue. // Doesn't retire the active connection ID. if f.RetirePriorTo > h.highestRetired { - var next *list.Element[newConnID] - for el := h.queue.Front(); el != nil; el = next { - if el.Value.SequenceNumber >= f.RetirePriorTo { - break + var newQueue []newConnID + for _, entry := range h.queue { + if entry.SequenceNumber >= f.RetirePriorTo { + newQueue = append(newQueue, entry) + } else { + h.queueControlFrame(&wire.RetireConnectionIDFrame{SequenceNumber: entry.SequenceNumber}) } - next = el.Next() - h.queueControlFrame(&wire.RetireConnectionIDFrame{ - SequenceNumber: el.Value.SequenceNumber, - }) - h.queue.Remove(el) } + h.queue = newQueue h.highestRetired = f.RetirePriorTo } @@ -109,39 +130,43 @@ func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error { } func (h *connIDManager) addConnectionID(seq uint64, connID protocol.ConnectionID, resetToken protocol.StatelessResetToken) error { - // insert a new element at the end - if h.queue.Len() == 0 || h.queue.Back().Value.SequenceNumber < seq { - h.queue.PushBack(newConnID{ + // fast path: add to the end of the queue + if len(h.queue) == 0 || h.queue[len(h.queue)-1].SequenceNumber < seq { + h.queue = append(h.queue, newConnID{ SequenceNumber: seq, ConnectionID: connID, StatelessResetToken: resetToken, }) return nil } - // insert a new element somewhere in the middle - for el := h.queue.Front(); el != nil; el = el.Next() { - if el.Value.SequenceNumber == seq { - if el.Value.ConnectionID != connID { + + // slow path: insert in the middle + for i, entry := range h.queue { + if entry.SequenceNumber == seq { + if entry.ConnectionID != connID { return fmt.Errorf("received conflicting connection IDs for sequence number %d", seq) } - if el.Value.StatelessResetToken != resetToken { + if entry.StatelessResetToken != resetToken { return fmt.Errorf("received conflicting stateless reset tokens for sequence number %d", seq) } - break + return nil } - if el.Value.SequenceNumber > seq { - h.queue.InsertBefore(newConnID{ + + // insert at the correct position to maintain sorted order + if entry.SequenceNumber > seq { + h.queue = slices.Insert(h.queue, i, newConnID{ SequenceNumber: seq, ConnectionID: connID, StatelessResetToken: resetToken, - }, el) - break + }) + return nil } } - return nil + return nil // unreachable } func (h *connIDManager) updateConnectionID() { + h.assertNotClosed() h.queueControlFrame(&wire.RetireConnectionIDFrame{ SequenceNumber: h.activeSequenceNumber, }) @@ -150,7 +175,8 @@ func (h *connIDManager) updateConnectionID() { h.removeStatelessResetToken(*h.activeStatelessResetToken) } - front := h.queue.Remove(h.queue.Front()) + front := h.queue[0] + h.queue = h.queue[1:] h.activeSequenceNumber = front.SequenceNumber h.activeConnectionID = front.ConnectionID h.activeStatelessResetToken = &front.StatelessResetToken @@ -160,9 +186,15 @@ func (h *connIDManager) updateConnectionID() { } func (h *connIDManager) Close() { + h.closed = true if h.activeStatelessResetToken != nil { h.removeStatelessResetToken(*h.activeStatelessResetToken) } + if h.pathProbing != nil { + for _, entry := range h.pathProbing { + h.removeStatelessResetToken(entry.StatelessResetToken) + } + } } // is called when the server performs a Retry @@ -176,6 +208,7 @@ func (h *connIDManager) ChangeInitialConnID(newConnID protocol.ConnectionID) { // is called when the server provides a stateless reset token in the transport parameters func (h *connIDManager) SetStatelessResetToken(token protocol.StatelessResetToken) { + h.assertNotClosed() if h.activeSequenceNumber != 0 { panic("expected first connection ID to have sequence number 0") } @@ -192,17 +225,18 @@ func (h *connIDManager) shouldUpdateConnID() bool { return false } // initiate the first change as early as possible (after handshake completion) - if h.queue.Len() > 0 && h.activeSequenceNumber == 0 { + if len(h.queue) > 0 && h.activeSequenceNumber == 0 { return true } // For later changes, only change if // 1. The queue of connection IDs is filled more than 50%. // 2. We sent at least PacketsPerConnectionID packets - return 2*h.queue.Len() >= protocol.MaxActiveConnectionIDs && + return 2*len(h.queue) >= protocol.MaxActiveConnectionIDs && h.packetsSinceLastChange >= h.packetsPerConnectionID } func (h *connIDManager) Get() protocol.ConnectionID { + h.assertNotClosed() if h.shouldUpdateConnID() { h.updateConnectionID() } @@ -212,3 +246,76 @@ func (h *connIDManager) Get() protocol.ConnectionID { func (h *connIDManager) SetHandshakeComplete() { h.handshakeComplete = true } + +// GetConnIDForPath retrieves a connection ID for a new path (i.e. not the active one). +// Once a connection ID is allocated for a path, it cannot be used for a different path. +// When called with the same pathID, it will return the same connection ID, +// unless the peer requested that this connection ID be retired. +func (h *connIDManager) GetConnIDForPath(id pathID) (protocol.ConnectionID, bool) { + h.assertNotClosed() + // if we're using zero-length connection IDs, we don't need to change the connection ID + if h.activeConnectionID.Len() == 0 { + return protocol.ConnectionID{}, true + } + + if h.pathProbing == nil { + h.pathProbing = make(map[pathID]newConnID) + } + entry, ok := h.pathProbing[id] + if ok { + return entry.ConnectionID, true + } + if len(h.queue) == 0 { + return protocol.ConnectionID{}, false + } + front := h.queue[0] + h.queue = h.queue[1:] + h.pathProbing[id] = front + h.highestProbingID = front.SequenceNumber + h.addStatelessResetToken(front.StatelessResetToken) + return front.ConnectionID, true +} + +func (h *connIDManager) RetireConnIDForPath(pathID pathID) { + h.assertNotClosed() + // if we're using zero-length connection IDs, we don't need to change the connection ID + if h.activeConnectionID.Len() == 0 { + return + } + + entry, ok := h.pathProbing[pathID] + if !ok { + return + } + h.queueControlFrame(&wire.RetireConnectionIDFrame{ + SequenceNumber: entry.SequenceNumber, + }) + h.removeStatelessResetToken(entry.StatelessResetToken) + delete(h.pathProbing, pathID) +} + +func (h *connIDManager) IsActiveStatelessResetToken(token protocol.StatelessResetToken) bool { + if h.activeStatelessResetToken != nil { + if *h.activeStatelessResetToken == token { + return true + } + } + if h.pathProbing != nil { + for _, entry := range h.pathProbing { + if entry.StatelessResetToken == token { + return true + } + } + } + return false +} + +// Using the connIDManager after it has been closed can have disastrous effects: +// If the connection ID is rotated, a new entry would be inserted into the packet handler map, +// leading to a memory leak of the connection struct. +// See https://github.com/quic-go/quic-go/pull/4852 for more details. +func (h *connIDManager) assertNotClosed() { + if h.closed { + panic("connection ID manager is closed") + } +} diff --git a/vendor/github.com/quic-go/quic-go/connection.go b/vendor/github.com/quic-go/quic-go/connection.go index 0dbfa574..413266a5 100644 --- a/vendor/github.com/quic-go/quic-go/connection.go +++ b/vendor/github.com/quic-go/quic-go/connection.go @@ -18,24 +18,19 @@ import ( "github.com/quic-go/quic-go/internal/ackhandler" "github.com/quic-go/quic-go/internal/flowcontrol" "github.com/quic-go/quic-go/internal/handshake" - "github.com/quic-go/quic-go/internal/logutils" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/qerr" "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/utils/ringbuffer" "github.com/quic-go/quic-go/internal/wire" "github.com/quic-go/quic-go/logging" ) type unpacker interface { - UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.Version) (*unpackedPacket, error) + UnpackLongHeader(hdr *wire.Header, data []byte) (*unpackedPacket, error) UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) } -type streamGetter interface { - GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) - GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) -} - type streamManager interface { GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) @@ -61,6 +56,7 @@ type cryptoStreamHandler interface { GetSessionTicket() ([]byte, error) NextEvent() handshake.Event DiscardInitialKeys() + HandleMessage([]byte, protocol.EncryptionLevel) error io.Closer ConnectionState() handshake.ConnectionState } @@ -92,7 +88,6 @@ func (p *receivedPacket) Clone() *receivedPacket { type connRunner interface { Add(protocol.ConnectionID, packetHandler) bool - GetStatelessResetToken(protocol.ConnectionID) protocol.StatelessResetToken Retire(protocol.ConnectionID) Remove(protocol.ConnectionID) ReplaceWithClosed([]protocol.ConnectionID, []byte) @@ -102,7 +97,6 @@ type connRunner interface { type closeError struct { err error - remote bool immediate bool } @@ -120,6 +114,8 @@ func nextConnTracingID() ConnectionTracingID { return ConnectionTracingID(connTr // A Connection is a QUIC connection type connection struct { + tr *Transport + // Destination connection ID used during the handshake. // Used to check source connection ID on incoming packets. handshakeDestConnID protocol.ConnectionID @@ -136,6 +132,11 @@ type connection struct { conn sendConn sendQueue sender + // lazily initialzed: most connections never migrate + pathManager *pathManager + largestRcvdAppData protocol.PacketNumber + pathManagerOutgoing atomic.Pointer[pathManagerOutgoing] + streamsMap streamManager connIDManager *connIDManager connIDGenerator *connIDGenerator @@ -146,8 +147,7 @@ type connection struct { sentPacketHandler ackhandler.SentPacketHandler receivedPacketHandler ackhandler.ReceivedPacketHandler retransmissionQueue *retransmissionQueue - framer framer - windowUpdateQueue *windowUpdateQueue + framer *framer connFlowController flowcontrol.ConnectionFlowController tokenStoreKey string // only set for the client tokenGenerator *handshake.TokenGenerator // only set for the server @@ -157,19 +157,21 @@ type connection struct { packer packer mtuDiscoverer mtuDiscoverer // initialized when the transport parameters are received - maxPayloadSizeEstimate atomic.Uint32 + currentMTUEstimate atomic.Uint32 - initialStream cryptoStream - handshakeStream cryptoStream - oneRTTStream cryptoStream // only set for the server + initialStream *cryptoStream + handshakeStream *cryptoStream + oneRTTStream *cryptoStream // only set for the server cryptoStreamHandler cryptoStreamHandler - receivedPackets chan receivedPacket - sendingScheduled chan struct{} + notifyReceivedPacket chan struct{} + sendingScheduled chan struct{} + receivedPacketMx sync.Mutex + receivedPackets ringbuffer.RingBuffer[receivedPacket] - closeOnce sync.Once // closeChan is used to notify the run loop that it should terminate - closeChan chan closeError + closeChan chan struct{} + closeErr atomic.Pointer[closeError] ctx context.Context ctxCancel context.CancelCauseFunc @@ -226,14 +228,14 @@ var newConnection = func( ctx context.Context, ctxCancel context.CancelCauseFunc, conn sendConn, - runner connRunner, + tr *Transport, origDestConnID protocol.ConnectionID, retrySrcConnID *protocol.ConnectionID, clientDestConnID protocol.ConnectionID, destConnID protocol.ConnectionID, srcConnID protocol.ConnectionID, connIDGenerator ConnectionIDGenerator, - statelessResetToken protocol.StatelessResetToken, + statelessResetter *statelessResetter, conf *Config, tlsConf *tls.Config, tokenGenerator *handshake.TokenGenerator, @@ -245,6 +247,7 @@ var newConnection = func( s := &connection{ ctx: ctx, ctxCancel: ctxCancel, + tr: tr, conn: conn, config: conf, handshakeDestConnID: destConnID, @@ -261,6 +264,7 @@ var newConnection = func( } else { s.logID = destConnID.String() } + runner := tr.connRunner() s.connIDManager = newConnIDManager( destConnID, func(token protocol.StatelessResetToken) { runner.AddResetToken(token, s) }, @@ -268,13 +272,16 @@ var newConnection = func( s.queueControlFrame, ) s.connIDGenerator = newConnIDGenerator( + tr.id(), srcConnID, &clientDestConnID, - func(connID protocol.ConnectionID) { runner.Add(connID, s) }, - runner.GetStatelessResetToken, - runner.Remove, - runner.Retire, - runner.ReplaceWithClosed, + statelessResetter, + connRunnerCallbacks{ + AddConnectionID: func(connID protocol.ConnectionID) { runner.Add(connID, s) }, + RemoveConnectionID: runner.Remove, + RetireConnectionID: runner.Retire, + ReplaceWithClosed: runner.ReplaceWithClosed, + }, s.queueControlFrame, connIDGenerator, ) @@ -289,7 +296,8 @@ var newConnection = func( s.tracer, s.logger, ) - s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize)))) + s.currentMTUEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize)))) + statelessResetToken := statelessResetter.GetStatelessResetToken(srcConnID) // Allow server to define custom MaxUDPPayloadSize maxUDPPayloadSize := protocol.MaxPacketBufferSize if maxPacketSize := os.Getenv("TUNNEL_MAX_QUIC_PACKET_SIZE"); maxPacketSize != "" { @@ -311,7 +319,6 @@ var newConnection = func( MaxAckDelay: protocol.MaxAckDelayInclGranularity, AckDelayExponent: protocol.AckDelayExponent, MaxUDPPayloadSize: protocol.ByteCount(maxUDPPayloadSize), - DisableActiveMigration: true, StatelessResetToken: &statelessResetToken, OriginalDestinationConnectionID: origDestConnID, // For interoperability with quic-go versions before May 2023, this value must be set to a value @@ -346,7 +353,7 @@ var newConnection = func( s.cryptoStreamHandler = cs s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective) s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen) - s.cryptoStreamManager = newCryptoStreamManager(cs, s.initialStream, s.handshakeStream, s.oneRTTStream) + s.cryptoStreamManager = newCryptoStreamManager(s.initialStream, s.handshakeStream, s.oneRTTStream) return s } @@ -354,10 +361,11 @@ var newConnection = func( var newClientConnection = func( ctx context.Context, conn sendConn, - runner connRunner, + tr *Transport, destConnID protocol.ConnectionID, srcConnID protocol.ConnectionID, connIDGenerator ConnectionIDGenerator, + statelessResetter *statelessResetter, conf *Config, tlsConf *tls.Config, initialPacketNumber protocol.PacketNumber, @@ -368,6 +376,7 @@ var newClientConnection = func( v protocol.Version, ) quicConn { s := &connection{ + tr: tr, conn: conn, config: conf, origDestConnID: destConnID, @@ -380,6 +389,7 @@ var newClientConnection = func( versionNegotiated: hasNegotiatedVersion, version: v, } + runner := tr.connRunner() s.connIDManager = newConnIDManager( destConnID, func(token protocol.StatelessResetToken) { runner.AddResetToken(token, s) }, @@ -387,13 +397,16 @@ var newClientConnection = func( s.queueControlFrame, ) s.connIDGenerator = newConnIDGenerator( + tr.id(), srcConnID, nil, - func(connID protocol.ConnectionID) { runner.Add(connID, s) }, - runner.GetStatelessResetToken, - runner.Remove, - runner.Retire, - runner.ReplaceWithClosed, + statelessResetter, + connRunnerCallbacks{ + AddConnectionID: func(connID protocol.ConnectionID) { runner.Add(connID, s) }, + RemoveConnectionID: runner.Remove, + RetireConnectionID: runner.Retire, + ReplaceWithClosed: runner.ReplaceWithClosed, + }, s.queueControlFrame, connIDGenerator, ) @@ -409,7 +422,7 @@ var newClientConnection = func( s.tracer, s.logger, ) - s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize)))) + s.currentMTUEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize)))) oneRTTStream := newCryptoStream() params := &wire.TransportParameters{ InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow), @@ -422,7 +435,6 @@ var newClientConnection = func( MaxAckDelay: protocol.MaxAckDelayInclGranularity, MaxUDPPayloadSize: protocol.MaxPacketBufferSize, AckDelayExponent: protocol.AckDelayExponent, - DisableActiveMigration: true, // For interoperability with quic-go versions before May 2023, this value must be set to a value // different from protocol.DefaultActiveConnectionIDLimit. // If set to the default value, it will be omitted from the transport parameters, which will make @@ -450,7 +462,7 @@ var newClientConnection = func( s.version, ) s.cryptoStreamHandler = cs - s.cryptoStreamManager = newCryptoStreamManager(cs, s.initialStream, s.handshakeStream, oneRTTStream) + s.cryptoStreamManager = newCryptoStreamManager(s.initialStream, s.handshakeStream, oneRTTStream) s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen) s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective) if len(tlsConf.ServerName) > 0 { @@ -467,6 +479,7 @@ var newClientConnection = func( } func (s *connection) preSetup() { + s.largestRcvdAppData = protocol.InvalidPacketNumber s.initialStream = newCryptoStream() s.handshakeStream = newCryptoStream() s.sendQueue = newSendQueue(s.conn) @@ -476,7 +489,6 @@ func (s *connection) preSetup() { s.connFlowController = flowcontrol.NewConnectionFlowController( protocol.ByteCount(s.config.InitialConnectionReceiveWindow), protocol.ByteCount(s.config.MaxConnectionReceiveWindow), - s.onHasConnectionWindowUpdate, func(size protocol.ByteCount) bool { if s.config.AllowConnectionWindowIncrease == nil { return true @@ -490,14 +502,16 @@ func (s *connection) preSetup() { s.streamsMap = newStreamsMap( s.ctx, s, + s.queueControlFrame, s.newFlowController, uint64(s.config.MaxIncomingStreams), uint64(s.config.MaxIncomingUniStreams), s.perspective, ) - s.framer = newFramer(s.streamsMap) - s.receivedPackets = make(chan receivedPacket, protocol.MaxConnUnprocessedPackets) - s.closeChan = make(chan closeError, 1) + s.framer = newFramer(s.connFlowController) + s.receivedPackets.Init(8) + s.notifyReceivedPacket = make(chan struct{}, 1) + s.closeChan = make(chan struct{}, 1) s.sendingScheduled = make(chan struct{}, 1) s.handshakeCompleteChan = make(chan struct{}) @@ -505,22 +519,32 @@ func (s *connection) preSetup() { s.lastPacketReceivedTime = now s.creationTime = now - s.windowUpdateQueue = newWindowUpdateQueue(s.streamsMap, s.connFlowController, s.framer.QueueControlFrame) s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger) s.connState.Version = s.version } // run the connection main loop -func (s *connection) run() error { - var closeErr closeError - defer func() { s.ctxCancel(closeErr.err) }() +func (s *connection) run() (err error) { + defer func() { s.ctxCancel(err) }() + + defer func() { + // drain queued packets that will never be processed + s.receivedPacketMx.Lock() + defer s.receivedPacketMx.Unlock() + + for !s.receivedPackets.Empty() { + p := s.receivedPackets.PopFront() + p.buffer.Decrement() + p.buffer.MaybeRelease() + } + }() s.timer = *newTimer() if err := s.cryptoStreamHandler.StartHandshake(s.ctx); err != nil { return err } - if err := s.handleHandshakeEvents(); err != nil { + if err := s.handleHandshakeEvents(time.Now()); err != nil { return err } go func() { @@ -538,91 +562,88 @@ func (s *connection) run() error { runLoop: for { if s.framer.QueuedTooManyControlFrames() { - s.closeLocal(&qerr.TransportError{ErrorCode: InternalError}) + s.setCloseError(&closeError{err: &qerr.TransportError{ErrorCode: InternalError}}) + break runLoop } // Close immediately if requested select { - case closeErr = <-s.closeChan: + case <-s.closeChan: break runLoop default: } - s.maybeResetTimer() + // no need to set a timer if we can send packets immediately + if s.pacingDeadline != deadlineSendImmediately { + s.maybeResetTimer() + } - var processedUndecryptablePacket bool + // 1st: handle undecryptable packets, if any. + // This can only occur before completion of the handshake. if len(s.undecryptablePacketsToProcess) > 0 { + var processedUndecryptablePacket bool queue := s.undecryptablePacketsToProcess s.undecryptablePacketsToProcess = nil for _, p := range queue { - if processed := s.handlePacketImpl(p); processed { + processed, err := s.handleOnePacket(p) + if err != nil { + s.setCloseError(&closeError{err: err}) + break runLoop + } + if processed { processedUndecryptablePacket = true } - // Don't set timers and send packets if the packet made us close the connection. - select { - case closeErr = <-s.closeChan: - break runLoop - default: - } + } + if processedUndecryptablePacket { + // if we processed any undecryptable packets, jump to the resetting of the timers directly + continue } } - // If we processed any undecryptable packets, jump to the resetting of the timers directly. - if !processedUndecryptablePacket { + + // 2nd: receive packets. + processed, err := s.handlePackets() // don't check receivedPackets.Len() in the run loop to avoid locking the mutex + if err != nil { + s.setCloseError(&closeError{err: err}) + break runLoop + } + + // We don't need to wait for new events if: + // * we processed packets: we probably need to send an ACK, and potentially more data + // * the pacer allows us to send more packets immediately + shouldProceedImmediately := sendQueueAvailable == nil && (processed || s.pacingDeadline.Equal(deadlineSendImmediately)) + if !shouldProceedImmediately { + // 3rd: wait for something to happen: + // * closing of the connection + // * timer firing + // * sending scheduled + // * send queue available + // * received packets select { - case closeErr = <-s.closeChan: + case <-s.closeChan: break runLoop case <-s.timer.Chan(): s.timer.SetRead() - // We do all the interesting stuff after the switch statement, so - // nothing to see here. case <-s.sendingScheduled: - // We do all the interesting stuff after the switch statement, so - // nothing to see here. case <-sendQueueAvailable: - case firstPacket := <-s.receivedPackets: - wasProcessed := s.handlePacketImpl(firstPacket) - // Don't set timers and send packets if the packet made us close the connection. - select { - case closeErr = <-s.closeChan: + case <-s.notifyReceivedPacket: + wasProcessed, err := s.handlePackets() + if err != nil { + s.setCloseError(&closeError{err: err}) break runLoop - default: } - if s.handshakeComplete { - // Now process all packets in the receivedPackets channel. - // Limit the number of packets to the length of the receivedPackets channel, - // so we eventually get a chance to send out an ACK when receiving a lot of packets. - numPackets := len(s.receivedPackets) - receiveLoop: - for i := 0; i < numPackets; i++ { - select { - case p := <-s.receivedPackets: - if processed := s.handlePacketImpl(p); processed { - wasProcessed = true - } - select { - case closeErr = <-s.closeChan: - break runLoop - default: - } - default: - break receiveLoop - } - } - } - // Only reset the timers if this packet was actually processed. - // This avoids modifying any state when handling undecryptable packets, - // which could be injected by an attacker. + // if we processed any undecryptable packets, jump to the resetting of the timers directly if !wasProcessed { continue } } } + // Check for loss detection timeout. + // This could cause packets to be declared lost, and retransmissions to be enqueued. now := time.Now() if timeout := s.sentPacketHandler.GetLossDetectionTimeout(); !timeout.IsZero() && timeout.Before(now) { - // This could cause packets to be retransmitted. - // Check it before trying to send packets. - if err := s.sentPacketHandler.OnLossDetectionTimeout(); err != nil { - s.closeLocal(err) + if err := s.sentPacketHandler.OnLossDetectionTimeout(now); err != nil { + s.setCloseError(&closeError{err: err}) + break runLoop } } @@ -633,35 +654,56 @@ runLoop: s.keepAlivePingSent = true } else if !s.handshakeComplete && now.Sub(s.creationTime) >= s.config.handshakeTimeout() { s.destroyImpl(qerr.ErrHandshakeTimeout) - continue + break runLoop } else { idleTimeoutStartTime := s.idleTimeoutStartTime() if (!s.handshakeComplete && now.Sub(idleTimeoutStartTime) >= s.config.HandshakeIdleTimeout) || (s.handshakeComplete && now.After(s.nextIdleTimeoutTime())) { s.destroyImpl(qerr.ErrIdleTimeout) - continue + break runLoop + } + } + + if s.perspective == protocol.PerspectiveClient { + pm := s.pathManagerOutgoing.Load() + if pm != nil { + tr, ok := pm.ShouldSwitchPath() + if ok { + s.switchToNewPath(tr, now) + } } } if s.sendQueue.WouldBlock() { - // The send queue is still busy sending out packets. - // Wait until there's space to enqueue new packets. + // The send queue is still busy sending out packets. Wait until there's space to enqueue new packets. sendQueueAvailable = s.sendQueue.Available() + // Cancel the pacing timer, as we can't send any more packets until the send queue is available again. + s.pacingDeadline = time.Time{} continue } + + if s.closeErr.Load() != nil { + break runLoop + } + if err := s.triggerSending(now); err != nil { - s.closeLocal(err) + s.setCloseError(&closeError{err: err}) + break runLoop } if s.sendQueue.WouldBlock() { + // The send queue is still busy sending out packets. Wait until there's space to enqueue new packets. sendQueueAvailable = s.sendQueue.Available() + // Cancel the pacing timer, as we can't send any more packets until the send queue is available again. + s.pacingDeadline = time.Time{} } else { sendQueueAvailable = nil } } + closeErr := s.closeErr.Load() s.cryptoStreamHandler.Close() s.sendQueue.Close() // close the send queue before sending the CONNECTION_CLOSE - s.handleCloseError(&closeErr) + s.handleCloseError(closeErr) if s.tracer != nil && s.tracer.Close != nil { if e := (&errCloseForRecreating{}); !errors.As(closeErr.err, &e) { s.tracer.Close() @@ -708,7 +750,7 @@ func (s *connection) nextIdleTimeoutTime() time.Time { // Time when the next keep-alive packet should be sent. // It returns a zero time if no keep-alive should be sent. func (s *connection) nextKeepAliveTime() time.Time { - if s.config.KeepAlivePeriod == 0 || s.keepAlivePingSent || !s.firstAckElicitingPacketAfterIdleSentTime.IsZero() { + if s.config.KeepAlivePeriod == 0 || s.keepAlivePingSent { return time.Time{} } keepAliveInterval := max(s.keepAliveInterval, s.rttStats.PTO(true)*3/2) @@ -718,10 +760,10 @@ func (s *connection) nextKeepAliveTime() time.Time { func (s *connection) maybeResetTimer() { var deadline time.Time if !s.handshakeComplete { - deadline = utils.MinTime( - s.creationTime.Add(s.config.handshakeTimeout()), - s.idleTimeoutStartTime().Add(s.config.HandshakeIdleTimeout), - ) + deadline = s.creationTime.Add(s.config.handshakeTimeout()) + if t := s.idleTimeoutStartTime().Add(s.config.HandshakeIdleTimeout); t.Before(deadline) { + deadline = t + } } else { if keepAliveTime := s.nextKeepAliveTime(); !keepAliveTime.IsZero() { deadline = keepAliveTime @@ -739,10 +781,32 @@ func (s *connection) maybeResetTimer() { } func (s *connection) idleTimeoutStartTime() time.Time { - return utils.MaxTime(s.lastPacketReceivedTime, s.firstAckElicitingPacketAfterIdleSentTime) + startTime := s.lastPacketReceivedTime + if t := s.firstAckElicitingPacketAfterIdleSentTime; t.After(startTime) { + startTime = t + } + return startTime } -func (s *connection) handleHandshakeComplete() error { +func (s *connection) switchToNewPath(tr *Transport, now time.Time) { + initialPacketSize := protocol.ByteCount(s.config.InitialPacketSize) + s.sentPacketHandler.MigratedPath(now, initialPacketSize) + maxPacketSize := protocol.ByteCount(protocol.MaxPacketBufferSize) + if s.peerParams.MaxUDPPayloadSize > 0 && s.peerParams.MaxUDPPayloadSize < maxPacketSize { + maxPacketSize = s.peerParams.MaxUDPPayloadSize + } + s.mtuDiscoverer.Reset(now, initialPacketSize, maxPacketSize) + s.conn = newSendConn(tr.conn, s.conn.RemoteAddr(), packetInfo{}, utils.DefaultLogger) // TODO: find a better way + s.sendQueue.Close() + s.sendQueue = newSendQueue(s.conn) + go func() { + if err := s.sendQueue.Run(); err != nil { + s.destroyImpl(err) + } + }() +} + +func (s *connection) handleHandshakeComplete(now time.Time) error { defer close(s.handshakeCompleteChan) // Once the handshake completes, we have derived 1-RTT keys. // There's no point in queueing undecryptable packets for later decryption anymore. @@ -763,7 +827,7 @@ func (s *connection) handleHandshakeComplete() error { } // All these only apply to the server side. - if err := s.handleHandshakeConfirmed(); err != nil { + if err := s.handleHandshakeConfirmed(now); err != nil { return err } @@ -786,42 +850,82 @@ func (s *connection) handleHandshakeComplete() error { return nil } -func (s *connection) handleHandshakeConfirmed() error { - if err := s.dropEncryptionLevel(protocol.EncryptionHandshake); err != nil { +func (s *connection) handleHandshakeConfirmed(now time.Time) error { + if err := s.dropEncryptionLevel(protocol.EncryptionHandshake, now); err != nil { return err } s.handshakeConfirmed = true - s.sentPacketHandler.SetHandshakeConfirmed() s.cryptoStreamHandler.SetHandshakeConfirmed() if !s.config.DisablePathMTUDiscovery && s.conn.capabilities().DF { - s.mtuDiscoverer.Start() + s.mtuDiscoverer.Start(now) } return nil } -func (s *connection) handlePacketImpl(rp receivedPacket) bool { - s.sentPacketHandler.ReceivedBytes(rp.Size()) +func (s *connection) handlePackets() (wasProcessed bool, _ error) { + // Now process all packets in the receivedPackets channel. + // Limit the number of packets to the length of the receivedPackets channel, + // so we eventually get a chance to send out an ACK when receiving a lot of packets. + s.receivedPacketMx.Lock() + numPackets := s.receivedPackets.Len() + if numPackets == 0 { + s.receivedPacketMx.Unlock() + return false, nil + } + + var hasMorePackets bool + for i := 0; i < numPackets; i++ { + if i > 0 { + s.receivedPacketMx.Lock() + } + p := s.receivedPackets.PopFront() + hasMorePackets = !s.receivedPackets.Empty() + s.receivedPacketMx.Unlock() + + processed, err := s.handleOnePacket(p) + if err != nil { + return false, err + } + if processed { + wasProcessed = true + } + if !hasMorePackets { + break + } + // only process a single packet at a time before handshake completion + if !s.handshakeComplete { + break + } + } + if hasMorePackets { + select { + case s.notifyReceivedPacket <- struct{}{}: + default: + } + } + return wasProcessed, nil +} + +func (s *connection) handleOnePacket(rp receivedPacket) (wasProcessed bool, _ error) { + s.sentPacketHandler.ReceivedBytes(rp.Size(), rp.rcvTime) if wire.IsVersionNegotiationPacket(rp.data) { s.handleVersionNegotiationPacket(rp) - return false + return false, nil } var counter uint8 var lastConnID protocol.ConnectionID - var processed bool data := rp.data p := rp for len(data) > 0 { - var destConnID protocol.ConnectionID if counter > 0 { p = *(p.Clone()) p.data = data - var err error - destConnID, err = wire.ParseConnectionID(p.data, s.srcConnIDLen) + destConnID, err := wire.ParseConnectionID(p.data, s.srcConnIDLen) if err != nil { if s.tracer != nil && s.tracer.DroppedPacket != nil { s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropHeaderParseError) @@ -873,24 +977,34 @@ func (s *connection) handlePacketImpl(rp receivedPacket) bool { p.data = packetData - if wasProcessed := s.handleLongHeaderPacket(p, hdr); wasProcessed { - processed = true + processed, err := s.handleLongHeaderPacket(p, hdr) + if err != nil { + return false, err + } + if processed { + wasProcessed = true } data = rest } else { if counter > 0 { p.buffer.Split() } - processed = s.handleShortHeaderPacket(p, destConnID) + processed, err := s.handleShortHeaderPacket(p, counter > 0) + if err != nil { + return false, err + } + if processed { + wasProcessed = true + } break } } p.buffer.MaybeRelease() - return processed + return wasProcessed, nil } -func (s *connection) handleShortHeaderPacket(p receivedPacket, destConnID protocol.ConnectionID) bool { +func (s *connection) handleShortHeaderPacket(p receivedPacket, isCoalesced bool) (wasProcessed bool, _ error) { var wasQueued bool defer func() { @@ -900,11 +1014,28 @@ func (s *connection) handleShortHeaderPacket(p receivedPacket, destConnID protoc } }() + destConnID, err := wire.ParseConnectionID(p.data, s.srcConnIDLen) + if err != nil { + s.tracer.DroppedPacket(logging.PacketType1RTT, protocol.InvalidPacketNumber, protocol.ByteCount(len(p.data)), logging.PacketDropHeaderParseError) + return false, nil + } pn, pnLen, keyPhase, data, err := s.unpacker.UnpackShortHeader(p.rcvTime, p.data) if err != nil { - wasQueued = s.handleUnpackError(err, p, logging.PacketType1RTT) - return false + // Stateless reset packets (see RFC 9000, section 10.3): + // * fill the entire UDP datagram (i.e. they cannot be part of a coalesced packet) + // * are short header packets (first bit is 0) + // * have the QUIC bit set (second bit is 1) + // * are at least 21 bytes long + if !isCoalesced && len(p.data) >= protocol.MinReceivedStatelessResetSize && p.data[0]&0b11000000 == 0b01000000 { + token := protocol.StatelessResetToken(p.data[len(p.data)-16:]) + if s.connIDManager.IsActiveStatelessResetToken(token) { + return false, &StatelessResetError{} + } + } + wasQueued, err = s.handleUnpackError(err, p, logging.PacketType1RTT) + return false, err } + s.largestRcvdAppData = max(s.largestRcvdAppData, pn) if s.logger.Debug() { s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, 1-RTT", pn, p.Size(), destConnID) @@ -916,7 +1047,7 @@ func (s *connection) handleShortHeaderPacket(p receivedPacket, destConnID protoc if s.tracer != nil && s.tracer.DroppedPacket != nil { s.tracer.DroppedPacket(logging.PacketType1RTT, pn, p.Size(), logging.PacketDropDuplicate) } - return false + return false, nil } var log func([]logging.Frame) @@ -935,14 +1066,59 @@ func (s *connection) handleShortHeaderPacket(p receivedPacket, destConnID protoc ) } } - if err := s.handleUnpackedShortHeaderPacket(destConnID, pn, data, p.ecn, p.rcvTime, log); err != nil { - s.closeLocal(err) - return false + isNonProbing, pathChallenge, err := s.handleUnpackedShortHeaderPacket(destConnID, pn, data, p.ecn, p.rcvTime, log) + if err != nil { + return false, err } - return true + + // In RFC 9000, only the client can migrate between paths. + if s.perspective == protocol.PerspectiveClient { + return true, nil + } + if addrsEqual(p.remoteAddr, s.RemoteAddr()) { + return true, nil + } + + var shouldSwitchPath bool + if s.pathManager == nil { + s.pathManager = newPathManager( + s.connIDManager.GetConnIDForPath, + s.connIDManager.RetireConnIDForPath, + s.logger, + ) + } + destConnID, frames, shouldSwitchPath := s.pathManager.HandlePacket(p.remoteAddr, p.rcvTime, pathChallenge, isNonProbing) + if len(frames) > 0 { + probe, buf, err := s.packer.PackPathProbePacket(destConnID, frames, s.version) + if err != nil { + return true, err + } + s.logger.Debugf("sending path probe packet to %s", p.remoteAddr) + s.logShortHeaderPacket(probe.DestConnID, probe.Ack, probe.Frames, probe.StreamFrames, probe.PacketNumber, probe.PacketNumberLen, probe.KeyPhase, protocol.ECNNon, buf.Len(), false) + s.registerPackedShortHeaderPacket(probe, protocol.ECNNon, p.rcvTime) + s.sendQueue.SendProbe(buf, p.remoteAddr) + } + // We only switch paths in response to the highest-numbered non-probing packet, + // see section 9.3 of RFC 9000. + if !shouldSwitchPath || pn != s.largestRcvdAppData { + return true, nil + } + s.pathManager.SwitchToPath(p.remoteAddr) + s.sentPacketHandler.MigratedPath(p.rcvTime, protocol.ByteCount(s.config.InitialPacketSize)) + maxPacketSize := protocol.ByteCount(protocol.MaxPacketBufferSize) + if s.peerParams.MaxUDPPayloadSize > 0 && s.peerParams.MaxUDPPayloadSize < maxPacketSize { + maxPacketSize = s.peerParams.MaxUDPPayloadSize + } + s.mtuDiscoverer.Reset( + p.rcvTime, + protocol.ByteCount(s.config.InitialPacketSize), + maxPacketSize, + ) + s.conn.ChangeRemoteAddr(p.remoteAddr, p.info) + return true, nil } -func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) bool /* was the packet successfully processed */ { +func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) (wasProcessed bool, _ error) { var wasQueued bool defer func() { @@ -953,7 +1129,7 @@ func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) }() if hdr.Type == protocol.PacketTypeRetry { - return s.handleRetryPacket(hdr, p.data, p.rcvTime) + return s.handleRetryPacket(hdr, p.data, p.rcvTime), nil } // The server can change the source connection ID with the first Handshake packet. @@ -963,20 +1139,20 @@ func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) s.tracer.DroppedPacket(logging.PacketTypeInitial, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnknownConnectionID) } s.logger.Debugf("Dropping Initial packet (%d bytes) with unexpected source connection ID: %s (expected %s)", p.Size(), hdr.SrcConnectionID, s.handshakeDestConnID) - return false + return false, nil } // drop 0-RTT packets, if we are a client if s.perspective == protocol.PerspectiveClient && hdr.Type == protocol.PacketType0RTT { if s.tracer != nil && s.tracer.DroppedPacket != nil { - s.tracer.DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropKeyUnavailable) + s.tracer.DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedPacket) } - return false + return false, nil } - packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data, s.version) + packet, err := s.unpacker.UnpackLongHeader(hdr, p.data) if err != nil { - wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr)) - return false + wasQueued, err = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr)) + return false, err } if s.logger.Debug() { @@ -989,39 +1165,40 @@ func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) if s.tracer != nil && s.tracer.DroppedPacket != nil { s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), pn, p.Size(), logging.PacketDropDuplicate) } - return false + return false, nil } if err := s.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil { - s.closeLocal(err) - return false + return false, err } - return true + return true, nil } -func (s *connection) handleUnpackError(err error, p receivedPacket, pt logging.PacketType) (wasQueued bool) { +func (s *connection) handleUnpackError(err error, p receivedPacket, pt logging.PacketType) (wasQueued bool, _ error) { switch err { case handshake.ErrKeysDropped: if s.tracer != nil && s.tracer.DroppedPacket != nil { s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropKeyUnavailable) } s.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", pt, p.Size()) + return false, nil case handshake.ErrKeysNotYetAvailable: // Sealer for this encryption level not yet available. // Try again later. s.tryQueueingUndecryptablePacket(p, pt) - return true + return true, nil case wire.ErrInvalidReservedBits: - s.closeLocal(&qerr.TransportError{ + return false, &qerr.TransportError{ ErrorCode: qerr.ProtocolViolation, ErrorMessage: err.Error(), - }) + } case handshake.ErrDecryptionFailed: // This might be a packet injected by an attacker. Drop it. if s.tracer != nil && s.tracer.DroppedPacket != nil { s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropPayloadDecryptError) } s.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", pt, p.Size(), err) + return false, nil default: var headerErr *headerParseError if errors.As(err, &headerErr) { @@ -1030,13 +1207,12 @@ func (s *connection) handleUnpackError(err error, p receivedPacket, pt logging.P s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropHeaderParseError) } s.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", pt, p.Size(), err) - } else { - // This is an error returned by the AEAD (other than ErrDecryptionFailed). - // For example, a PROTOCOL_VIOLATION due to key updates. - s.closeLocal(err) + return false, nil } + // This is an error returned by the AEAD (other than ErrDecryptionFailed). + // For example, a PROTOCOL_VIOLATION due to key updates. + return false, err } - return false } func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime time.Time) bool /* was this a valid Retry */ { @@ -1078,6 +1254,15 @@ func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime ti return false } + newDestConnID := hdr.SrcConnectionID + s.receivedRetry = true + s.sentPacketHandler.ResetForRetry(rcvTime) + s.handshakeDestConnID = newDestConnID + s.retrySrcConnID = &newDestConnID + s.cryptoStreamHandler.ChangeConnectionID(newDestConnID) + s.packer.SetToken(hdr.Token) + s.connIDManager.ChangeInitialConnID(newDestConnID) + if s.logger.Debug() { s.logger.Debugf("<- Received Retry:") (&wire.ExtendedHeader{Header: *hdr}).Log(s.logger) @@ -1086,17 +1271,7 @@ func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime ti if s.tracer != nil && s.tracer.ReceivedRetry != nil { s.tracer.ReceivedRetry(hdr) } - newDestConnID := hdr.SrcConnectionID - s.receivedRetry = true - if err := s.sentPacketHandler.ResetForRetry(rcvTime); err != nil { - s.closeLocal(err) - return false - } - s.handshakeDestConnID = newDestConnID - s.retrySrcConnID = &newDestConnID - s.cryptoStreamHandler.ChangeConnectionID(newDestConnID) - s.packer.SetToken(hdr.Token) - s.connIDManager.ChangeInitialConnID(newDestConnID) + s.scheduleSending() return true } @@ -1205,7 +1380,7 @@ func (s *connection) handleUnpackedLongHeaderPacket( !s.droppedInitialKeys { // On the server side, Initial keys are dropped as soon as the first Handshake packet is received. // See Section 4.9.1 of RFC 9001. - if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil { + if err := s.dropEncryptionLevel(protocol.EncryptionInitial, rcvTime); err != nil { return err } } @@ -1214,13 +1389,17 @@ func (s *connection) handleUnpackedLongHeaderPacket( s.firstAckElicitingPacketAfterIdleSentTime = time.Time{} s.keepAlivePingSent = false + if packet.hdr.Type == protocol.PacketType0RTT { + s.largestRcvdAppData = max(s.largestRcvdAppData, packet.hdr.PacketNumber) + } + var log func([]logging.Frame) if s.tracer != nil && s.tracer.ReceivedLongHeaderPacket != nil { log = func(frames []logging.Frame) { s.tracer.ReceivedLongHeaderPacket(packet.hdr, packetSize, ecn, frames) } } - isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log) + isAckEliciting, _, _, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log, rcvTime) if err != nil { return err } @@ -1234,24 +1413,30 @@ func (s *connection) handleUnpackedShortHeaderPacket( ecn protocol.ECN, rcvTime time.Time, log func([]logging.Frame), -) error { +) (isNonProbing bool, pathChallenge *wire.PathChallengeFrame, _ error) { s.lastPacketReceivedTime = rcvTime s.firstAckElicitingPacketAfterIdleSentTime = time.Time{} s.keepAlivePingSent = false - isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log) + isAckEliciting, isNonProbing, pathChallenge, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log, rcvTime) if err != nil { - return err + return false, nil, err } - return s.receivedPacketHandler.ReceivedPacket(pn, ecn, protocol.Encryption1RTT, rcvTime, isAckEliciting) + if err := s.receivedPacketHandler.ReceivedPacket(pn, ecn, protocol.Encryption1RTT, rcvTime, isAckEliciting); err != nil { + return false, nil, err + } + return isNonProbing, pathChallenge, nil } +// handleFrames parses the frames, one after the other, and handles them. +// It returns the last PATH_CHALLENGE frame contained in the packet, if any. func (s *connection) handleFrames( data []byte, destConnID protocol.ConnectionID, encLevel protocol.EncryptionLevel, log func([]logging.Frame), -) (isAckEliciting bool, _ error) { + rcvTime time.Time, +) (isAckEliciting, isNonProbing bool, pathChallenge *wire.PathChallengeFrame, _ error) { // Only used for tracing. // If we're not tracing, this slice will always remain empty. var frames []logging.Frame @@ -1263,7 +1448,7 @@ func (s *connection) handleFrames( for len(data) > 0 { l, frame, err := s.frameParser.ParseNext(data, encLevel, s.version) if err != nil { - return false, err + return false, false, nil, err } data = data[l:] if frame == nil { @@ -1272,27 +1457,34 @@ func (s *connection) handleFrames( if ackhandler.IsFrameAckEliciting(frame) { isAckEliciting = true } + if !wire.IsProbingFrame(frame) { + isNonProbing = true + } if log != nil { - frames = append(frames, logutils.ConvertFrame(frame)) + frames = append(frames, toLoggingFrame(frame)) } // An error occurred handling a previous frame. // Don't handle the current frame. if handleErr != nil { continue } - if err := s.handleFrame(frame, encLevel, destConnID); err != nil { + pc, err := s.handleFrame(frame, encLevel, destConnID, rcvTime) + if err != nil { if log == nil { - return false, err + return false, false, nil, err } // If we're logging, we need to keep parsing (but not handling) all frames. handleErr = err } + if pc != nil { + pathChallenge = pc + } } if log != nil { log(frames) if handleErr != nil { - return false, handleErr + return false, false, nil, handleErr } } @@ -1301,28 +1493,32 @@ func (s *connection) handleFrames( // We receive a Handshake packet that contains the CRYPTO frame that allows us to complete the handshake, // and an ACK serialized after that CRYPTO frame. In this case, we still want to process the ACK frame. if !handshakeWasComplete && s.handshakeComplete { - if err := s.handleHandshakeComplete(); err != nil { - return false, err + if err := s.handleHandshakeComplete(rcvTime); err != nil { + return false, false, nil, err } } - return } -func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel, destConnID protocol.ConnectionID) error { +func (s *connection) handleFrame( + f wire.Frame, + encLevel protocol.EncryptionLevel, + destConnID protocol.ConnectionID, + rcvTime time.Time, +) (pathChallenge *wire.PathChallengeFrame, _ error) { var err error wire.LogFrame(s.logger, f, false) switch frame := f.(type) { case *wire.CryptoFrame: - err = s.handleCryptoFrame(frame, encLevel) + err = s.handleCryptoFrame(frame, encLevel, rcvTime) case *wire.StreamFrame: - err = s.handleStreamFrame(frame) + err = s.handleStreamFrame(frame, rcvTime) case *wire.AckFrame: - err = s.handleAckFrame(frame, encLevel) + err = s.handleAckFrame(frame, encLevel, rcvTime) case *wire.ConnectionCloseFrame: - s.handleConnectionCloseFrame(frame) + err = s.handleConnectionCloseFrame(frame) case *wire.ResetStreamFrame: - err = s.handleResetStreamFrame(frame) + err = s.handleResetStreamFrame(frame, rcvTime) case *wire.MaxDataFrame: s.handleMaxDataFrame(frame) case *wire.MaxStreamDataFrame: @@ -1331,15 +1527,16 @@ func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel s.handleMaxStreamsFrame(frame) case *wire.DataBlockedFrame: case *wire.StreamDataBlockedFrame: + err = s.handleStreamDataBlockedFrame(frame) case *wire.StreamsBlockedFrame: case *wire.StopSendingFrame: err = s.handleStopSendingFrame(frame) case *wire.PingFrame: case *wire.PathChallengeFrame: s.handlePathChallengeFrame(frame) + pathChallenge = frame case *wire.PathResponseFrame: - // since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs - err = errors.New("unexpected PATH_RESPONSE frame") + err = s.handlePathResponseFrame(frame) case *wire.NewTokenFrame: err = s.handleNewTokenFrame(frame) case *wire.NewConnectionIDFrame: @@ -1347,53 +1544,69 @@ func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel case *wire.RetireConnectionIDFrame: err = s.handleRetireConnectionIDFrame(frame, destConnID) case *wire.HandshakeDoneFrame: - err = s.handleHandshakeDoneFrame() + err = s.handleHandshakeDoneFrame(rcvTime) case *wire.DatagramFrame: err = s.handleDatagramFrame(frame) default: err = fmt.Errorf("unexpected frame type: %s", reflect.ValueOf(&frame).Elem().Type().Name()) } - return err + return pathChallenge, err } // handlePacket is called by the server with a new packet func (s *connection) handlePacket(p receivedPacket) { + s.receivedPacketMx.Lock() // Discard packets once the amount of queued packets is larger than // the channel size, protocol.MaxConnUnprocessedPackets - select { - case s.receivedPackets <- p: - default: + if s.receivedPackets.Len() >= protocol.MaxConnUnprocessedPackets { if s.tracer != nil && s.tracer.DroppedPacket != nil { s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropDOSPrevention) } + s.receivedPacketMx.Unlock() + return + } + s.receivedPackets.PushBack(p) + s.receivedPacketMx.Unlock() + + select { + case s.notifyReceivedPacket <- struct{}{}: + default: } } -func (s *connection) handleConnectionCloseFrame(frame *wire.ConnectionCloseFrame) { +func (s *connection) handleConnectionCloseFrame(frame *wire.ConnectionCloseFrame) error { if frame.IsApplicationError { - s.closeRemote(&qerr.ApplicationError{ + return &qerr.ApplicationError{ Remote: true, ErrorCode: qerr.ApplicationErrorCode(frame.ErrorCode), ErrorMessage: frame.ReasonPhrase, - }) - return + } } - s.closeRemote(&qerr.TransportError{ + return &qerr.TransportError{ Remote: true, ErrorCode: qerr.TransportErrorCode(frame.ErrorCode), FrameType: frame.FrameType, ErrorMessage: frame.ReasonPhrase, - }) + } } -func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error { +func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) error { if err := s.cryptoStreamManager.HandleCryptoFrame(frame, encLevel); err != nil { return err } - return s.handleHandshakeEvents() + for { + data := s.cryptoStreamManager.GetCryptoData(encLevel) + if data == nil { + break + } + if err := s.cryptoStreamHandler.HandleMessage(data, encLevel); err != nil { + return err + } + } + return s.handleHandshakeEvents(rcvTime) } -func (s *connection) handleHandshakeEvents() error { +func (s *connection) handleHandshakeEvents(now time.Time) error { for { ev := s.cryptoStreamHandler.NextEvent() var err error @@ -1410,11 +1623,11 @@ func (s *connection) handleHandshakeEvents() error { s.restoreTransportParameters(ev.TransportParameters) close(s.earlyConnReadyChan) case handshake.EventReceivedReadKeys: - // Queue all packets for decryption that have been undecryptable so far. - s.undecryptablePacketsToProcess = s.undecryptablePackets + // queue all previously undecryptable packets + s.undecryptablePacketsToProcess = append(s.undecryptablePacketsToProcess, s.undecryptablePackets...) s.undecryptablePackets = nil case handshake.EventDiscard0RTTKeys: - err = s.dropEncryptionLevel(protocol.Encryption0RTT) + err = s.dropEncryptionLevel(protocol.Encryption0RTT, now) case handshake.EventWriteInitialData: _, err = s.initialStream.Write(ev.Data) case handshake.EventWriteHandshakeData: @@ -1426,17 +1639,15 @@ func (s *connection) handleHandshakeEvents() error { } } -func (s *connection) handleStreamFrame(frame *wire.StreamFrame) error { +func (s *connection) handleStreamFrame(frame *wire.StreamFrame, rcvTime time.Time) error { str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) if err != nil { return err } - if str == nil { - // Stream is closed and already garbage collected - // ignore this StreamFrame + if str == nil { // stream was already closed and garbage collected return nil } - return str.handleStreamFrame(frame) + return str.handleStreamFrame(frame, rcvTime) } func (s *connection) handleMaxDataFrame(frame *wire.MaxDataFrame) { @@ -1456,11 +1667,18 @@ func (s *connection) handleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) er return nil } +func (s *connection) handleStreamDataBlockedFrame(frame *wire.StreamDataBlockedFrame) error { + // We don't need to do anything in response to a STREAM_DATA_BLOCKED frame, + // but we need to make sure that the stream ID is valid. + _, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) + return err +} + func (s *connection) handleMaxStreamsFrame(frame *wire.MaxStreamsFrame) { s.streamsMap.HandleMaxStreamsFrame(frame) } -func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame) error { +func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame, rcvTime time.Time) error { str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) if err != nil { return err @@ -1469,7 +1687,7 @@ func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame) error // stream is closed and already garbage collected return nil } - return str.handleResetStreamFrame(frame) + return str.handleResetStreamFrame(frame, rcvTime) } func (s *connection) handleStopSendingFrame(frame *wire.StopSendingFrame) error { @@ -1485,8 +1703,45 @@ func (s *connection) handleStopSendingFrame(frame *wire.StopSendingFrame) error return nil } -func (s *connection) handlePathChallengeFrame(frame *wire.PathChallengeFrame) { - s.queueControlFrame(&wire.PathResponseFrame{Data: frame.Data}) +func (s *connection) handlePathChallengeFrame(f *wire.PathChallengeFrame) { + if s.perspective == protocol.PerspectiveClient { + s.queueControlFrame(&wire.PathResponseFrame{Data: f.Data}) + } +} + +func (s *connection) handlePathResponseFrame(f *wire.PathResponseFrame) error { + switch s.perspective { + case protocol.PerspectiveClient: + return s.handlePathResponseFrameClient(f) + case protocol.PerspectiveServer: + return s.handlePathResponseFrameServer(f) + default: + panic("unreachable") + } +} + +func (s *connection) handlePathResponseFrameClient(f *wire.PathResponseFrame) error { + pm := s.pathManagerOutgoing.Load() + if pm == nil { + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "unexpected PATH_RESPONSE frame", + } + } + pm.HandlePathResponseFrame(f) + return nil +} + +func (s *connection) handlePathResponseFrameServer(f *wire.PathResponseFrame) error { + if s.pathManager == nil { + // since we didn't send PATH_CHALLENGEs yet, we don't expect PATH_RESPONSEs + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "unexpected PATH_RESPONSE frame", + } + } + s.pathManager.HandlePathResponseFrame(f) + return nil } func (s *connection) handleNewTokenFrame(frame *wire.NewTokenFrame) error { @@ -1510,7 +1765,7 @@ func (s *connection) handleRetireConnectionIDFrame(f *wire.RetireConnectionIDFra return s.connIDGenerator.Retire(f.SequenceNumber, destConnID) } -func (s *connection) handleHandshakeDoneFrame() error { +func (s *connection) handleHandshakeDoneFrame(rcvTime time.Time) error { if s.perspective == protocol.PerspectiveServer { return &qerr.TransportError{ ErrorCode: qerr.ProtocolViolation, @@ -1518,12 +1773,12 @@ func (s *connection) handleHandshakeDoneFrame() error { } } if !s.handshakeConfirmed { - return s.handleHandshakeConfirmed() + return s.handleHandshakeConfirmed(rcvTime) } return nil } -func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel) error { +func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) error { acked1RTTPacket, err := s.sentPacketHandler.ReceivedAck(frame, encLevel, s.lastPacketReceivedTime) if err != nil { return err @@ -1535,10 +1790,17 @@ func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.Encr // This is only possible if the ACK was sent in a 1-RTT packet. // This is an optimization over simply waiting for a HANDSHAKE_DONE frame, see section 4.1.2 of RFC 9001. if s.perspective == protocol.PerspectiveClient && !s.handshakeConfirmed { - if err := s.handleHandshakeConfirmed(); err != nil { + if err := s.handleHandshakeConfirmed(rcvTime); err != nil { return err } } + // If one of the acknowledged packets was a Path MTU probe packet, this might have increased the Path MTU estimate. + if s.mtuDiscoverer != nil { + if mtu := s.mtuDiscoverer.CurrentSize(); mtu > protocol.ByteCount(s.currentMTUEstimate.Load()) { + s.currentMTUEstimate.Store(uint32(mtu)) + s.sentPacketHandler.SetMaxDatagramSize(mtu) + } + } return s.cryptoStreamHandler.SetLargest1RTTAcked(frame.LargestAcked()) } @@ -1553,16 +1815,17 @@ func (s *connection) handleDatagramFrame(f *wire.DatagramFrame) error { return nil } +func (s *connection) setCloseError(e *closeError) { + s.closeErr.CompareAndSwap(nil, e) + select { + case s.closeChan <- struct{}{}: + default: + } +} + // closeLocal closes the connection and send a CONNECTION_CLOSE containing the error func (s *connection) closeLocal(e error) { - s.closeOnce.Do(func() { - if e == nil { - s.logger.Infof("Closing connection.") - } else { - s.logger.Errorf("Closing connection with error: %s", e) - } - s.closeChan <- closeError{err: e, immediate: false, remote: false} - }) + s.setCloseError(&closeError{err: e, immediate: false}) } // destroy closes the connection without sending the error on the wire @@ -1572,21 +1835,7 @@ func (s *connection) destroy(e error) { } func (s *connection) destroyImpl(e error) { - s.closeOnce.Do(func() { - if nerr, ok := e.(net.Error); ok && nerr.Timeout() { - s.logger.Errorf("Destroying connection: %s", e) - } else { - s.logger.Errorf("Destroying connection with error: %s", e) - } - s.closeChan <- closeError{err: e, immediate: true, remote: false} - }) -} - -func (s *connection) closeRemote(e error) { - s.closeOnce.Do(func() { - s.logger.Errorf("Peer closed connection with error: %s", e) - s.closeChan <- closeError{err: e, immediate: true, remote: true} - }) + s.setCloseError(&closeError{err: e, immediate: true}) } func (s *connection) CloseWithError(code ApplicationErrorCode, desc string) error { @@ -1604,13 +1853,25 @@ func (s *connection) closeWithTransportError(code TransportErrorCode) { } func (s *connection) handleCloseError(closeErr *closeError) { + if closeErr.immediate { + if nerr, ok := closeErr.err.(net.Error); ok && nerr.Timeout() { + s.logger.Errorf("Destroying connection: %s", closeErr.err) + } else { + s.logger.Errorf("Destroying connection with error: %s", closeErr.err) + } + } else { + if closeErr.err == nil { + s.logger.Infof("Closing connection.") + } else { + s.logger.Errorf("Closing connection with error: %s", closeErr.err) + } + } + e := closeErr.err if e == nil { e = &qerr.ApplicationError{} } else { - defer func() { - closeErr.err = e - }() + defer func() { closeErr.err = e }() } var ( @@ -1620,14 +1881,19 @@ func (s *connection) handleCloseError(closeErr *closeError) { applicationErr *ApplicationError transportErr *TransportError ) + var isRemoteClose bool switch { case errors.Is(e, qerr.ErrIdleTimeout), errors.Is(e, qerr.ErrHandshakeTimeout), errors.As(e, &statelessResetErr), errors.As(e, &versionNegotiationErr), - errors.As(e, &recreateErr), - errors.As(e, &applicationErr), - errors.As(e, &transportErr): + errors.As(e, &recreateErr): + case errors.As(e, &applicationErr): + isRemoteClose = applicationErr.Remote + case errors.As(e, &transportErr): + isRemoteClose = transportErr.Remote + case closeErr.immediate: + e = closeErr.err default: e = &qerr.TransportError{ ErrorCode: qerr.InternalError, @@ -1636,17 +1902,22 @@ func (s *connection) handleCloseError(closeErr *closeError) { } s.streamsMap.CloseWithError(e) - s.connIDManager.Close() if s.datagramQueue != nil { s.datagramQueue.CloseWithError(e) } + // In rare instances, the connection ID manager might switch to a new connection ID + // when sending the CONNECTION_CLOSE frame. + // The connection ID manager removes the active stateless reset token from the packet + // handler map when it is closed, so we need to make sure that this happens last. + defer s.connIDManager.Close() + if s.tracer != nil && s.tracer.ClosedConnection != nil && !errors.As(e, &recreateErr) { s.tracer.ClosedConnection(e) } // If this is a remote close we're done here - if closeErr.remote { + if isRemoteClose { s.connIDGenerator.ReplaceWithClosed(nil) return } @@ -1667,11 +1938,11 @@ func (s *connection) handleCloseError(closeErr *closeError) { s.connIDGenerator.ReplaceWithClosed(connClosePacket) } -func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel) error { +func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel, now time.Time) error { if s.tracer != nil && s.tracer.DroppedEncryptionLevel != nil { s.tracer.DroppedEncryptionLevel(encLevel) } - s.sentPacketHandler.DropPackets(encLevel) + s.sentPacketHandler.DropPackets(encLevel, now) s.receivedPacketHandler.DropPackets(encLevel) //nolint:exhaustive // only Initial and 0-RTT need special treatment switch encLevel { @@ -1680,10 +1951,8 @@ func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel) erro s.cryptoStreamHandler.DiscardInitialKeys() case protocol.Encryption0RTT: s.streamsMap.ResetFor0RTT() - if err := s.connFlowController.Reset(); err != nil { - return err - } - return s.framer.Handle0RTTRejection() + s.framer.Handle0RTTRejection() + return s.connFlowController.Reset() } return s.cryptoStreamManager.Drop(encLevel) } @@ -1770,8 +2039,12 @@ func (s *connection) checkTransportParameters(params *wire.TransportParameters) func (s *connection) applyTransportParameters() { params := s.peerParams // Our local idle timeout will always be > 0. - s.idleTimeout = utils.MinNonZeroDuration(s.config.MaxIdleTimeout, params.MaxIdleTimeout) - s.keepAliveInterval = min(s.config.KeepAlivePeriod, min(s.idleTimeout/2, protocol.MaxKeepAliveInterval)) + s.idleTimeout = s.config.MaxIdleTimeout + // If the peer advertised an idle timeout, take the minimum of the values. + if params.MaxIdleTimeout > 0 { + s.idleTimeout = min(s.idleTimeout, params.MaxIdleTimeout) + } + s.keepAliveInterval = min(s.config.KeepAlivePeriod, s.idleTimeout/2) s.streamsMap.UpdateLimits(params) s.frameParser.SetAckDelayExponent(params.AckDelayExponent) s.connFlowController.UpdateSendWindow(params.InitialMaxData) @@ -1793,7 +2066,6 @@ func (s *connection) applyTransportParameters() { s.rttStats, protocol.ByteCount(s.config.InitialPacketSize), maxPacketSize, - s.onMTUIncreased, s.tracer, ) } @@ -1821,28 +2093,10 @@ func (s *connection) triggerSending(now time.Time) error { case ackhandler.SendAck: // We can at most send a single ACK only packet. // There will only be a new ACK after receiving new packets. - // SendAck is only returned when we're congestion limited, so we don't need to set the pacinggs timer. + // SendAck is only returned when we're congestion limited, so we don't need to set the pacing timer. return s.maybeSendAckOnlyPacket(now) - case ackhandler.SendPTOInitial: - if err := s.sendProbePacket(protocol.EncryptionInitial, now); err != nil { - return err - } - if s.sendQueue.WouldBlock() { - s.scheduleSending() - return nil - } - return s.triggerSending(now) - case ackhandler.SendPTOHandshake: - if err := s.sendProbePacket(protocol.EncryptionHandshake, now); err != nil { - return err - } - if s.sendQueue.WouldBlock() { - s.scheduleSending() - return nil - } - return s.triggerSending(now) - case ackhandler.SendPTOAppData: - if err := s.sendProbePacket(protocol.Encryption1RTT, now); err != nil { + case ackhandler.SendPTOInitial, ackhandler.SendPTOHandshake, ackhandler.SendPTOAppData: + if err := s.sendProbePacket(sendMode, now); err != nil { return err } if s.sendQueue.WouldBlock() { @@ -1856,12 +2110,31 @@ func (s *connection) triggerSending(now time.Time) error { } func (s *connection) sendPackets(now time.Time) error { + if s.perspective == protocol.PerspectiveClient && s.handshakeConfirmed { + if pm := s.pathManagerOutgoing.Load(); pm != nil { + connID, frame, tr, ok := pm.NextPathToProbe() + if ok { + probe, buf, err := s.packer.PackPathProbePacket(connID, []ackhandler.Frame{frame}, s.version) + if err != nil { + return err + } + s.logger.Debugf("sending path probe packet from %s", s.LocalAddr()) + s.logShortHeaderPacket(probe.DestConnID, probe.Ack, probe.Frames, probe.StreamFrames, probe.PacketNumber, probe.PacketNumberLen, probe.KeyPhase, protocol.ECNNon, buf.Len(), false) + s.registerPackedShortHeaderPacket(probe, protocol.ECNNon, now) + tr.WriteTo(buf.Data, s.conn.RemoteAddr()) + // There's (likely) more data to send. Loop around again. + s.scheduleSending() + return nil + } + } + } + // Path MTU Discovery // Can't use GSO, since we need to send a single packet that's larger than our current maximum size. // Performance-wise, this doesn't matter, since we only send a very small (<10) number of // MTU probe packets per connection. if s.handshakeConfirmed && s.mtuDiscoverer != nil && s.mtuDiscoverer.ShouldSendProbe(now) { - ping, size := s.mtuDiscoverer.GetPing() + ping, size := s.mtuDiscoverer.GetPing(now) p, buf, err := s.packer.PackMTUProbePacket(ping, size, s.version) if err != nil { return err @@ -1870,21 +2143,20 @@ func (s *connection) sendPackets(now time.Time) error { s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, buf.Len(), false) s.registerPackedShortHeaderPacket(p, ecn, now) s.sendQueue.Send(buf, 0, ecn) - // This is kind of a hack. We need to trigger sending again somehow. - s.pacingDeadline = deadlineSendImmediately + // There's (likely) more data to send. Loop around again. + s.scheduleSending() return nil } - if isBlocked, offset := s.connFlowController.IsNewlyBlocked(); isBlocked { - s.framer.QueueControlFrame(&wire.DataBlockedFrame{MaximumData: offset}) + if offset := s.connFlowController.GetWindowUpdate(now); offset > 0 { + s.framer.QueueControlFrame(&wire.MaxDataFrame{MaximumData: offset}) } - s.windowUpdateQueue.QueueAll() if cf := s.cryptoStreamManager.GetPostHandshakeData(protocol.MaxPostHandshakeCryptoFrameSize); cf != nil { s.queueControlFrame(cf) } if !s.handshakeConfirmed { - packet, err := s.packer.PackCoalescedPacket(false, s.maxPacketSize(), s.version) + packet, err := s.packer.PackCoalescedPacket(false, s.maxPacketSize(), now, s.version) if err != nil || packet == nil { return err } @@ -1892,10 +2164,11 @@ func (s *connection) sendPackets(now time.Time) error { if err := s.sendPackedCoalescedPacket(packet, s.sentPacketHandler.ECNMode(packet.IsOnlyShortHeaderPacket()), now); err != nil { return err } - sendMode := s.sentPacketHandler.SendMode(now) - if sendMode == ackhandler.SendPacingLimited { + //nolint:exhaustive // only need to handle pacing-related events here + switch s.sentPacketHandler.SendMode(now) { + case ackhandler.SendPacingLimited: s.resetPacingDeadline() - } else if sendMode == ackhandler.SendAny { + case ackhandler.SendAny: s.pacingDeadline = deadlineSendImmediately } return nil @@ -1933,7 +2206,10 @@ func (s *connection) sendPacketsWithoutGSO(now time.Time) error { return nil } // Prioritize receiving of packets over sending out more packets. - if len(s.receivedPackets) > 0 { + s.receivedPacketMx.Lock() + hasPackets := !s.receivedPackets.Empty() + s.receivedPacketMx.Unlock() + if hasPackets { s.pacingDeadline = deadlineSendImmediately return nil } @@ -1991,11 +2267,15 @@ func (s *connection) sendPacketsWithGSO(now time.Time) error { } // Prioritize receiving of packets over sending out more packets. - if len(s.receivedPackets) > 0 { + s.receivedPacketMx.Lock() + hasPackets := !s.receivedPackets.Empty() + s.receivedPacketMx.Unlock() + if hasPackets { s.pacingDeadline = deadlineSendImmediately return nil } + ecn = nextECN buf = getLargePacketBuffer() } } @@ -2011,7 +2291,7 @@ func (s *connection) resetPacingDeadline() { func (s *connection) maybeSendAckOnlyPacket(now time.Time) error { if !s.handshakeConfirmed { ecn := s.sentPacketHandler.ECNMode(false) - packet, err := s.packer.PackCoalescedPacket(true, s.maxPacketSize(), s.version) + packet, err := s.packer.PackCoalescedPacket(true, s.maxPacketSize(), now, s.version) if err != nil { return err } @@ -2022,7 +2302,7 @@ func (s *connection) maybeSendAckOnlyPacket(now time.Time) error { } ecn := s.sentPacketHandler.ECNMode(true) - p, buf, err := s.packer.PackAckOnlyPacket(s.maxPacketSize(), s.version) + p, buf, err := s.packer.PackAckOnlyPacket(s.maxPacketSize(), now, s.version) if err != nil { if err == errNothingToPack { return nil @@ -2035,33 +2315,41 @@ func (s *connection) maybeSendAckOnlyPacket(now time.Time) error { return nil } -func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time.Time) error { +func (s *connection) sendProbePacket(sendMode ackhandler.SendMode, now time.Time) error { + var encLevel protocol.EncryptionLevel + //nolint:exhaustive // We only need to handle the PTO send modes here. + switch sendMode { + case ackhandler.SendPTOInitial: + encLevel = protocol.EncryptionInitial + case ackhandler.SendPTOHandshake: + encLevel = protocol.EncryptionHandshake + case ackhandler.SendPTOAppData: + encLevel = protocol.Encryption1RTT + default: + return fmt.Errorf("connection BUG: unexpected send mode: %d", sendMode) + } // Queue probe packets until we actually send out a packet, // or until there are no more packets to queue. var packet *coalescedPacket - for { + for packet == nil { if wasQueued := s.sentPacketHandler.QueueProbePacket(encLevel); !wasQueued { break } var err error - packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), s.version) + packet, err = s.packer.PackPTOProbePacket(encLevel, s.maxPacketSize(), false, now, s.version) if err != nil { return err } - if packet != nil { - break - } } if packet == nil { - s.retransmissionQueue.AddPing(encLevel) var err error - packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), s.version) + packet, err = s.packer.PackPTOProbePacket(encLevel, s.maxPacketSize(), true, now, s.version) if err != nil { return err } } if packet == nil || (len(packet.longHdrPackets) == 0 && packet.shortHdrPacket == nil) { - return fmt.Errorf("connection BUG: couldn't pack %s probe packet", encLevel) + return fmt.Errorf("connection BUG: couldn't pack %s probe packet: %v", encLevel, packet) } return s.sendPackedCoalescedPacket(packet, s.sentPacketHandler.ECNMode(packet.IsOnlyShortHeaderPacket()), now) } @@ -2070,7 +2358,7 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time // If there was nothing to pack, the returned size is 0. func (s *connection) appendOneShortHeaderPacket(buf *packetBuffer, maxSize protocol.ByteCount, ecn protocol.ECN, now time.Time) (protocol.ByteCount, error) { startLen := buf.Len() - p, err := s.packer.AppendPacket(buf, maxSize, s.version) + p, err := s.packer.AppendPacket(buf, maxSize, now, s.version) if err != nil { return 0, err } @@ -2081,6 +2369,21 @@ func (s *connection) appendOneShortHeaderPacket(buf *packetBuffer, maxSize proto } func (s *connection) registerPackedShortHeaderPacket(p shortHeaderPacket, ecn protocol.ECN, now time.Time) { + if p.IsPathProbePacket { + s.sentPacketHandler.SentPacket( + now, + p.PacketNumber, + protocol.InvalidPacketNumber, + p.StreamFrames, + p.Frames, + protocol.Encryption1RTT, + ecn, + p.Length, + p.IsPathMTUProbePacket, + true, + ) + return + } if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && (len(p.StreamFrames) > 0 || ackhandler.HasAckElicitingFrames(p.Frames)) { s.firstAckElicitingPacketAfterIdleSentTime = now } @@ -2089,7 +2392,18 @@ func (s *connection) registerPackedShortHeaderPacket(p shortHeaderPacket, ecn pr if p.Ack != nil { largestAcked = p.Ack.LargestAcked() } - s.sentPacketHandler.SentPacket(now, p.PacketNumber, largestAcked, p.StreamFrames, p.Frames, protocol.Encryption1RTT, ecn, p.Length, p.IsPathMTUProbePacket) + s.sentPacketHandler.SentPacket( + now, + p.PacketNumber, + largestAcked, + p.StreamFrames, + p.Frames, + protocol.Encryption1RTT, + ecn, + p.Length, + p.IsPathMTUProbePacket, + false, + ) s.connIDManager.SentPacket() } @@ -2103,12 +2417,23 @@ func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, ecn prot if p.ack != nil { largestAcked = p.ack.LargestAcked() } - s.sentPacketHandler.SentPacket(now, p.header.PacketNumber, largestAcked, p.streamFrames, p.frames, p.EncryptionLevel(), ecn, p.length, false) + s.sentPacketHandler.SentPacket( + now, + p.header.PacketNumber, + largestAcked, + p.streamFrames, + p.frames, + p.EncryptionLevel(), + ecn, + p.length, + false, + false, + ) if s.perspective == protocol.PerspectiveClient && p.EncryptionLevel() == protocol.EncryptionHandshake && !s.droppedInitialKeys { // On the client side, Initial keys are dropped as soon as the first Handshake packet is sent. // See Section 4.9.1 of RFC 9001. - if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil { + if err := s.dropEncryptionLevel(protocol.EncryptionInitial, now); err != nil { return err } } @@ -2121,7 +2446,18 @@ func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, ecn prot if p.Ack != nil { largestAcked = p.Ack.LargestAcked() } - s.sentPacketHandler.SentPacket(now, p.PacketNumber, largestAcked, p.StreamFrames, p.Frames, protocol.Encryption1RTT, ecn, p.Length, p.IsPathMTUProbePacket) + s.sentPacketHandler.SentPacket( + now, + p.PacketNumber, + largestAcked, + p.StreamFrames, + p.Frames, + protocol.Encryption1RTT, + ecn, + p.Length, + p.IsPathMTUProbePacket, + false, + ) } s.connIDManager.SentPacket() s.sendQueue.Send(packet.buffer, 0, ecn) @@ -2169,128 +2505,6 @@ func (s *connection) maxPacketSize() protocol.ByteCount { return s.mtuDiscoverer.CurrentSize() } -func (s *connection) logLongHeaderPacket(p *longHeaderPacket, ecn protocol.ECN) { - // quic-go logging - if s.logger.Debug() { - p.header.Log(s.logger) - if p.ack != nil { - wire.LogFrame(s.logger, p.ack, true) - } - for _, frame := range p.frames { - wire.LogFrame(s.logger, frame.Frame, true) - } - for _, frame := range p.streamFrames { - wire.LogFrame(s.logger, frame.Frame, true) - } - } - - // tracing - if s.tracer != nil && s.tracer.SentLongHeaderPacket != nil { - frames := make([]logging.Frame, 0, len(p.frames)) - for _, f := range p.frames { - frames = append(frames, logutils.ConvertFrame(f.Frame)) - } - for _, f := range p.streamFrames { - frames = append(frames, logutils.ConvertFrame(f.Frame)) - } - var ack *logging.AckFrame - if p.ack != nil { - ack = logutils.ConvertAckFrame(p.ack) - } - s.tracer.SentLongHeaderPacket(p.header, p.length, ecn, ack, frames) - } -} - -func (s *connection) logShortHeaderPacket( - destConnID protocol.ConnectionID, - ackFrame *wire.AckFrame, - frames []ackhandler.Frame, - streamFrames []ackhandler.StreamFrame, - pn protocol.PacketNumber, - pnLen protocol.PacketNumberLen, - kp protocol.KeyPhaseBit, - ecn protocol.ECN, - size protocol.ByteCount, - isCoalesced bool, -) { - if s.logger.Debug() && !isCoalesced { - s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT (ECN: %s)", pn, size, s.logID, ecn) - } - // quic-go logging - if s.logger.Debug() { - wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp) - if ackFrame != nil { - wire.LogFrame(s.logger, ackFrame, true) - } - for _, f := range frames { - wire.LogFrame(s.logger, f.Frame, true) - } - for _, f := range streamFrames { - wire.LogFrame(s.logger, f.Frame, true) - } - } - - // tracing - if s.tracer != nil && s.tracer.SentShortHeaderPacket != nil { - fs := make([]logging.Frame, 0, len(frames)+len(streamFrames)) - for _, f := range frames { - fs = append(fs, logutils.ConvertFrame(f.Frame)) - } - for _, f := range streamFrames { - fs = append(fs, logutils.ConvertFrame(f.Frame)) - } - var ack *logging.AckFrame - if ackFrame != nil { - ack = logutils.ConvertAckFrame(ackFrame) - } - s.tracer.SentShortHeaderPacket( - &logging.ShortHeader{ - DestConnectionID: destConnID, - PacketNumber: pn, - PacketNumberLen: pnLen, - KeyPhase: kp, - }, - size, - ecn, - ack, - fs, - ) - } -} - -func (s *connection) logCoalescedPacket(packet *coalescedPacket, ecn protocol.ECN) { - if s.logger.Debug() { - // There's a short period between dropping both Initial and Handshake keys and completion of the handshake, - // during which we might call PackCoalescedPacket but just pack a short header packet. - if len(packet.longHdrPackets) == 0 && packet.shortHdrPacket != nil { - s.logShortHeaderPacket( - packet.shortHdrPacket.DestConnID, - packet.shortHdrPacket.Ack, - packet.shortHdrPacket.Frames, - packet.shortHdrPacket.StreamFrames, - packet.shortHdrPacket.PacketNumber, - packet.shortHdrPacket.PacketNumberLen, - packet.shortHdrPacket.KeyPhase, - ecn, - packet.shortHdrPacket.Length, - false, - ) - return - } - if len(packet.longHdrPackets) > 1 { - s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID) - } else { - s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel()) - } - } - for _, p := range packet.longHdrPackets { - s.logLongHeaderPacket(p, ecn) - } - if p := packet.shortHdrPacket; p != nil { - s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, p.Length, true) - } -} - // AcceptStream returns the next stream openend by the peer func (s *connection) AcceptStream(ctx context.Context) (Stream, error) { return s.streamsMap.AcceptStream(ctx) @@ -2332,7 +2546,6 @@ func (s *connection) newFlowController(id protocol.StreamID) flowcontrol.StreamF protocol.ByteCount(s.config.InitialStreamReceiveWindow), protocol.ByteCount(s.config.MaxStreamReceiveWindow), initialSendWindow, - s.onHasStreamWindowUpdate, s.rttStats, s.logger, ) @@ -2371,18 +2584,15 @@ func (s *connection) queueControlFrame(f wire.Frame) { s.scheduleSending() } -func (s *connection) onHasStreamWindowUpdate(id protocol.StreamID) { - s.windowUpdateQueue.AddStream(id) +func (s *connection) onHasConnectionData() { s.scheduleSending() } + +func (s *connection) onHasStreamData(id protocol.StreamID, str sendStreamI) { + s.framer.AddActiveStream(id, str) s.scheduleSending() } -func (s *connection) onHasConnectionWindowUpdate() { - s.windowUpdateQueue.AddConnection() - s.scheduleSending() -} - -func (s *connection) onHasStreamData(id protocol.StreamID) { - s.framer.AddActiveStream(id) +func (s *connection) onHasStreamControlFrame(id protocol.StreamID, str streamControlFrameGetter) { + s.framer.AddStreamWithControlFrames(id, str) s.scheduleSending() } @@ -2390,11 +2600,7 @@ func (s *connection) onStreamCompleted(id protocol.StreamID) { if err := s.streamsMap.DeleteStream(id); err != nil { s.closeLocal(err) } -} - -func (s *connection) onMTUIncreased(mtu protocol.ByteCount) { - s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(mtu))) - s.sentPacketHandler.SetMaxDatagramSize(mtu) + s.framer.RemoveActiveStream(id) } func (s *connection) SendDatagram(p []byte) error { @@ -2407,7 +2613,7 @@ func (s *connection) SendDatagram(p []byte) error { // Under many circumstances we could send a few more bytes. maxDataLen := min( f.MaxDataLen(s.peerParams.MaxDatagramFrameSize, s.version), - protocol.ByteCount(s.maxPayloadSizeEstimate.Load()), + protocol.ByteCount(s.currentMTUEstimate.Load()), ) if protocol.ByteCount(len(p)) > maxDataLen { return &DatagramTooLargeError{MaxDatagramPayloadSize: int64(maxDataLen)} @@ -2424,16 +2630,48 @@ func (s *connection) ReceiveDatagram(ctx context.Context) ([]byte, error) { return s.datagramQueue.Receive(ctx) } -func (s *connection) LocalAddr() net.Addr { - return s.conn.LocalAddr() +func (s *connection) LocalAddr() net.Addr { return s.conn.LocalAddr() } +func (s *connection) RemoteAddr() net.Addr { return s.conn.RemoteAddr() } + +func (s *connection) getPathManager() *pathManagerOutgoing { + s.pathManagerOutgoing.CompareAndSwap(nil, + func() *pathManagerOutgoing { // this function is only called if a swap is performed + return newPathManagerOutgoing( + s.connIDManager.GetConnIDForPath, + s.connIDManager.RetireConnIDForPath, + s.scheduleSending, + ) + }(), + ) + return s.pathManagerOutgoing.Load() } -func (s *connection) RemoteAddr() net.Addr { - return s.conn.RemoteAddr() -} - -func (s *connection) GetVersion() protocol.Version { - return s.version +func (s *connection) AddPath(t *Transport) (*Path, error) { + if s.perspective == protocol.PerspectiveServer { + return nil, errors.New("server cannot initiate connection migration") + } + if s.peerParams.DisableActiveMigration { + return nil, errors.New("server disabled connection migration") + } + if err := t.init(false); err != nil { + return nil, err + } + return s.getPathManager().NewPath( + t, + 200*time.Millisecond, // initial RTT estimate + func() { + runner := t.connRunner() + s.connIDGenerator.AddConnRunner( + t.id(), + connRunnerCallbacks{ + AddConnectionID: func(connID protocol.ConnectionID) { runner.Add(connID, s) }, + RemoveConnectionID: runner.Remove, + RetireConnectionID: runner.Retire, + ReplaceWithClosed: runner.ReplaceWithClosed, + }, + ) + }, + ), nil } func (s *connection) NextConnection(ctx context.Context) (Connection, error) { diff --git a/vendor/github.com/quic-go/quic-go/connection_logging.go b/vendor/github.com/quic-go/quic-go/connection_logging.go new file mode 100644 index 00000000..a314a6cd --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/connection_logging.go @@ -0,0 +1,168 @@ +package quic + +import ( + "slices" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" +) + +// ConvertFrame converts a wire.Frame into a logging.Frame. +// This makes it possible for external packages to access the frames. +// Furthermore, it removes the data slices from CRYPTO and STREAM frames. +func toLoggingFrame(frame wire.Frame) logging.Frame { + switch f := frame.(type) { + case *wire.AckFrame: + // We use a pool for ACK frames. + // Implementations of the tracer interface may hold on to frames, so we need to make a copy here. + return toLoggingAckFrame(f) + case *wire.CryptoFrame: + return &logging.CryptoFrame{ + Offset: f.Offset, + Length: protocol.ByteCount(len(f.Data)), + } + case *wire.StreamFrame: + return &logging.StreamFrame{ + StreamID: f.StreamID, + Offset: f.Offset, + Length: f.DataLen(), + Fin: f.Fin, + } + case *wire.DatagramFrame: + return &logging.DatagramFrame{ + Length: logging.ByteCount(len(f.Data)), + } + default: + return logging.Frame(frame) + } +} + +func toLoggingAckFrame(f *wire.AckFrame) *logging.AckFrame { + ack := &logging.AckFrame{ + AckRanges: slices.Clone(f.AckRanges), + DelayTime: f.DelayTime, + ECNCE: f.ECNCE, + ECT0: f.ECT0, + ECT1: f.ECT1, + } + return ack +} + +func (s *connection) logLongHeaderPacket(p *longHeaderPacket, ecn protocol.ECN) { + // quic-go logging + if s.logger.Debug() { + p.header.Log(s.logger) + if p.ack != nil { + wire.LogFrame(s.logger, p.ack, true) + } + for _, frame := range p.frames { + wire.LogFrame(s.logger, frame.Frame, true) + } + for _, frame := range p.streamFrames { + wire.LogFrame(s.logger, frame.Frame, true) + } + } + + // tracing + if s.tracer != nil && s.tracer.SentLongHeaderPacket != nil { + frames := make([]logging.Frame, 0, len(p.frames)) + for _, f := range p.frames { + frames = append(frames, toLoggingFrame(f.Frame)) + } + for _, f := range p.streamFrames { + frames = append(frames, toLoggingFrame(f.Frame)) + } + var ack *logging.AckFrame + if p.ack != nil { + ack = toLoggingAckFrame(p.ack) + } + s.tracer.SentLongHeaderPacket(p.header, p.length, ecn, ack, frames) + } +} + +func (s *connection) logShortHeaderPacket( + destConnID protocol.ConnectionID, + ackFrame *wire.AckFrame, + frames []ackhandler.Frame, + streamFrames []ackhandler.StreamFrame, + pn protocol.PacketNumber, + pnLen protocol.PacketNumberLen, + kp protocol.KeyPhaseBit, + ecn protocol.ECN, + size protocol.ByteCount, + isCoalesced bool, +) { + if s.logger.Debug() && !isCoalesced { + s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT (ECN: %s)", pn, size, s.logID, ecn) + } + // quic-go logging + if s.logger.Debug() { + wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp) + if ackFrame != nil { + wire.LogFrame(s.logger, ackFrame, true) + } + for _, f := range frames { + wire.LogFrame(s.logger, f.Frame, true) + } + for _, f := range streamFrames { + wire.LogFrame(s.logger, f.Frame, true) + } + } + + // tracing + if s.tracer != nil && s.tracer.SentShortHeaderPacket != nil { + fs := make([]logging.Frame, 0, len(frames)+len(streamFrames)) + for _, f := range frames { + fs = append(fs, toLoggingFrame(f.Frame)) + } + for _, f := range streamFrames { + fs = append(fs, toLoggingFrame(f.Frame)) + } + var ack *logging.AckFrame + if ackFrame != nil { + ack = toLoggingAckFrame(ackFrame) + } + s.tracer.SentShortHeaderPacket( + &logging.ShortHeader{DestConnectionID: destConnID, PacketNumber: pn, PacketNumberLen: pnLen, KeyPhase: kp}, + size, + ecn, + ack, + fs, + ) + } +} + +func (s *connection) logCoalescedPacket(packet *coalescedPacket, ecn protocol.ECN) { + if s.logger.Debug() { + // There's a short period between dropping both Initial and Handshake keys and completion of the handshake, + // during which we might call PackCoalescedPacket but just pack a short header packet. + if len(packet.longHdrPackets) == 0 && packet.shortHdrPacket != nil { + s.logShortHeaderPacket( + packet.shortHdrPacket.DestConnID, + packet.shortHdrPacket.Ack, + packet.shortHdrPacket.Frames, + packet.shortHdrPacket.StreamFrames, + packet.shortHdrPacket.PacketNumber, + packet.shortHdrPacket.PacketNumberLen, + packet.shortHdrPacket.KeyPhase, + ecn, + packet.shortHdrPacket.Length, + false, + ) + return + } + if len(packet.longHdrPackets) > 1 { + s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID) + } else { + s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel()) + } + } + for _, p := range packet.longHdrPackets { + s.logLongHeaderPacket(p, ecn) + } + if p := packet.shortHdrPacket; p != nil { + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, p.Length, true) + } +} diff --git a/vendor/github.com/quic-go/quic-go/crypto_stream.go b/vendor/github.com/quic-go/quic-go/crypto_stream.go index abc7ddcf..9a387baa 100644 --- a/vendor/github.com/quic-go/quic-go/crypto_stream.go +++ b/vendor/github.com/quic-go/quic-go/crypto_stream.go @@ -2,27 +2,14 @@ package quic import ( "fmt" - "io" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/qerr" "github.com/quic-go/quic-go/internal/wire" ) -type cryptoStream interface { - // for receiving data - HandleCryptoFrame(*wire.CryptoFrame) error - GetCryptoData() []byte - Finish() error - // for sending data - io.Writer - HasData() bool - PopCryptoFrame(protocol.ByteCount) *wire.CryptoFrame -} - -type cryptoStreamImpl struct { - queue *frameSorter - msgBuf []byte +type cryptoStream struct { + queue frameSorter highestOffset protocol.ByteCount finished bool @@ -31,11 +18,11 @@ type cryptoStreamImpl struct { writeBuf []byte } -func newCryptoStream() cryptoStream { - return &cryptoStreamImpl{queue: newFrameSorter()} +func newCryptoStream() *cryptoStream { + return &cryptoStream{queue: *newFrameSorter()} } -func (s *cryptoStreamImpl) HandleCryptoFrame(f *wire.CryptoFrame) error { +func (s *cryptoStream) HandleCryptoFrame(f *wire.CryptoFrame) error { highestOffset := f.Offset + protocol.ByteCount(len(f.Data)) if maxOffset := highestOffset; maxOffset > protocol.MaxCryptoStreamOffset { return &qerr.TransportError{ @@ -56,26 +43,16 @@ func (s *cryptoStreamImpl) HandleCryptoFrame(f *wire.CryptoFrame) error { return nil } s.highestOffset = max(s.highestOffset, highestOffset) - if err := s.queue.Push(f.Data, f.Offset, nil); err != nil { - return err - } - for { - _, data, _ := s.queue.Pop() - if data == nil { - return nil - } - s.msgBuf = append(s.msgBuf, data...) - } + return s.queue.Push(f.Data, f.Offset, nil) } // GetCryptoData retrieves data that was received in CRYPTO frames -func (s *cryptoStreamImpl) GetCryptoData() []byte { - b := s.msgBuf - s.msgBuf = nil - return b +func (s *cryptoStream) GetCryptoData() []byte { + _, data, _ := s.queue.Pop() + return data } -func (s *cryptoStreamImpl) Finish() error { +func (s *cryptoStream) Finish() error { if s.queue.HasMoreData() { return &qerr.TransportError{ ErrorCode: qerr.ProtocolViolation, @@ -87,16 +64,16 @@ func (s *cryptoStreamImpl) Finish() error { } // Writes writes data that should be sent out in CRYPTO frames -func (s *cryptoStreamImpl) Write(p []byte) (int, error) { +func (s *cryptoStream) Write(p []byte) (int, error) { s.writeBuf = append(s.writeBuf, p...) return len(p), nil } -func (s *cryptoStreamImpl) HasData() bool { +func (s *cryptoStream) HasData() bool { return len(s.writeBuf) > 0 } -func (s *cryptoStreamImpl) PopCryptoFrame(maxLen protocol.ByteCount) *wire.CryptoFrame { +func (s *cryptoStream) PopCryptoFrame(maxLen protocol.ByteCount) *wire.CryptoFrame { f := &wire.CryptoFrame{Offset: s.writeOffset} n := min(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf))) f.Data = s.writeBuf[:n] diff --git a/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go b/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go index c48e238a..d70b9b00 100644 --- a/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go +++ b/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go @@ -3,32 +3,22 @@ package quic import ( "fmt" - "github.com/quic-go/quic-go/internal/handshake" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/wire" ) -type cryptoDataHandler interface { - HandleMessage([]byte, protocol.EncryptionLevel) error - NextEvent() handshake.Event -} - type cryptoStreamManager struct { - cryptoHandler cryptoDataHandler - - initialStream cryptoStream - handshakeStream cryptoStream - oneRTTStream cryptoStream + initialStream *cryptoStream + handshakeStream *cryptoStream + oneRTTStream *cryptoStream } func newCryptoStreamManager( - cryptoHandler cryptoDataHandler, - initialStream cryptoStream, - handshakeStream cryptoStream, - oneRTTStream cryptoStream, + initialStream *cryptoStream, + handshakeStream *cryptoStream, + oneRTTStream *cryptoStream, ) *cryptoStreamManager { return &cryptoStreamManager{ - cryptoHandler: cryptoHandler, initialStream: initialStream, handshakeStream: handshakeStream, oneRTTStream: oneRTTStream, @@ -36,7 +26,7 @@ func newCryptoStreamManager( } func (m *cryptoStreamManager) HandleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error { - var str cryptoStream + var str *cryptoStream //nolint:exhaustive // CRYPTO frames cannot be sent in 0-RTT packets. switch encLevel { case protocol.EncryptionInitial: @@ -48,18 +38,23 @@ func (m *cryptoStreamManager) HandleCryptoFrame(frame *wire.CryptoFrame, encLeve default: return fmt.Errorf("received CRYPTO frame with unexpected encryption level: %s", encLevel) } - if err := str.HandleCryptoFrame(frame); err != nil { - return err - } - for { - data := str.GetCryptoData() - if data == nil { - return nil - } - if err := m.cryptoHandler.HandleMessage(data, encLevel); err != nil { - return err - } + return str.HandleCryptoFrame(frame) +} + +func (m *cryptoStreamManager) GetCryptoData(encLevel protocol.EncryptionLevel) []byte { + var str *cryptoStream + //nolint:exhaustive // CRYPTO frames cannot be sent in 0-RTT packets. + switch encLevel { + case protocol.EncryptionInitial: + str = m.initialStream + case protocol.EncryptionHandshake: + str = m.handshakeStream + case protocol.Encryption1RTT: + str = m.oneRTTStream + default: + panic(fmt.Sprintf("received CRYPTO frame with unexpected encryption level: %s", encLevel)) } + return str.GetCryptoData() } func (m *cryptoStreamManager) GetPostHandshakeData(maxSize protocol.ByteCount) *wire.CryptoFrame { diff --git a/vendor/github.com/quic-go/quic-go/errors.go b/vendor/github.com/quic-go/quic-go/errors.go index 3fe1e0a9..4a69a7f1 100644 --- a/vendor/github.com/quic-go/quic-go/errors.go +++ b/vendor/github.com/quic-go/quic-go/errors.go @@ -50,8 +50,8 @@ type StreamError struct { } func (e *StreamError) Is(target error) bool { - _, ok := target.(*StreamError) - return ok + t, ok := target.(*StreamError) + return ok && e.StreamID == t.StreamID && e.ErrorCode == t.ErrorCode && e.Remote == t.Remote } func (e *StreamError) Error() string { @@ -68,8 +68,8 @@ type DatagramTooLargeError struct { } func (e *DatagramTooLargeError) Is(target error) bool { - _, ok := target.(*DatagramTooLargeError) - return ok + t, ok := target.(*DatagramTooLargeError) + return ok && e.MaxDatagramPayloadSize == t.MaxDatagramPayloadSize } func (e *DatagramTooLargeError) Error() string { return "DATAGRAM frame too large" } diff --git a/vendor/github.com/quic-go/quic-go/framer.go b/vendor/github.com/quic-go/quic-go/framer.go index 5eef6537..fee31631 100644 --- a/vendor/github.com/quic-go/quic-go/framer.go +++ b/vendor/github.com/quic-go/quic-go/framer.go @@ -1,64 +1,54 @@ package quic import ( - "errors" + "slices" "sync" + "time" "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/flowcontrol" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/utils/ringbuffer" "github.com/quic-go/quic-go/internal/wire" "github.com/quic-go/quic-go/quicvarint" ) -type framer interface { - HasData() bool - - QueueControlFrame(wire.Frame) - AppendControlFrames([]ackhandler.Frame, protocol.ByteCount, protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) - - AddActiveStream(protocol.StreamID) - AppendStreamFrames([]ackhandler.StreamFrame, protocol.ByteCount, protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) - - Handle0RTTRejection() error - - // QueuedTooManyControlFrames says if the control frame queue exceeded its maximum queue length. - // This is a hack. - // It is easier to implement than propagating an error return value in QueueControlFrame. - // The correct solution would be to queue frames with their respective structs. - // See https://github.com/quic-go/quic-go/issues/4271 for the queueing of stream-related control frames. - QueuedTooManyControlFrames() bool -} - const ( maxPathResponses = 256 maxControlFrames = 16 << 10 ) -type framerI struct { +// This is the largest possible size of a stream-related control frame +// (which is the RESET_STREAM frame). +const maxStreamControlFrameSize = 25 + +type streamControlFrameGetter interface { + getControlFrame(time.Time) (_ ackhandler.Frame, ok, hasMore bool) +} + +type framer struct { mutex sync.Mutex - streamGetter streamGetter - - activeStreams map[protocol.StreamID]struct{} - streamQueue ringbuffer.RingBuffer[protocol.StreamID] + activeStreams map[protocol.StreamID]sendStreamI + streamQueue ringbuffer.RingBuffer[protocol.StreamID] + streamsWithControlFrames map[protocol.StreamID]streamControlFrameGetter controlFrameMutex sync.Mutex controlFrames []wire.Frame pathResponses []*wire.PathResponseFrame + connFlowController flowcontrol.ConnectionFlowController queuedTooManyControlFrames bool } -var _ framer = &framerI{} - -func newFramer(streamGetter streamGetter) framer { - return &framerI{ - streamGetter: streamGetter, - activeStreams: make(map[protocol.StreamID]struct{}), +func newFramer(connFlowController flowcontrol.ConnectionFlowController) *framer { + return &framer{ + activeStreams: make(map[protocol.StreamID]sendStreamI), + streamsWithControlFrames: make(map[protocol.StreamID]streamControlFrameGetter), + connFlowController: connFlowController, } } -func (f *framerI) HasData() bool { +func (f *framer) HasData() bool { f.mutex.Lock() hasData := !f.streamQueue.Empty() f.mutex.Unlock() @@ -67,10 +57,10 @@ func (f *framerI) HasData() bool { } f.controlFrameMutex.Lock() defer f.controlFrameMutex.Unlock() - return len(f.controlFrames) > 0 || len(f.pathResponses) > 0 + return len(f.streamsWithControlFrames) > 0 || len(f.controlFrames) > 0 || len(f.pathResponses) > 0 } -func (f *framerI) QueueControlFrame(frame wire.Frame) { +func (f *framer) QueueControlFrame(frame wire.Frame) { f.controlFrameMutex.Lock() defer f.controlFrameMutex.Unlock() @@ -92,10 +82,80 @@ func (f *framerI) QueueControlFrame(frame wire.Frame) { f.controlFrames = append(f.controlFrames, frame) } -func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) { +func (f *framer) Append( + frames []ackhandler.Frame, + streamFrames []ackhandler.StreamFrame, + maxLen protocol.ByteCount, + now time.Time, + v protocol.Version, +) ([]ackhandler.Frame, []ackhandler.StreamFrame, protocol.ByteCount) { f.controlFrameMutex.Lock() - defer f.controlFrameMutex.Unlock() + frames, controlFrameLen := f.appendControlFrames(frames, maxLen, now, v) + maxLen -= controlFrameLen + var lastFrame ackhandler.StreamFrame + var streamFrameLen protocol.ByteCount + f.mutex.Lock() + // pop STREAM frames, until less than 128 bytes are left in the packet + numActiveStreams := f.streamQueue.Len() + for i := 0; i < numActiveStreams; i++ { + if protocol.MinStreamFrameSize > maxLen { + break + } + sf, blocked := f.getNextStreamFrame(maxLen, v) + if sf.Frame != nil { + streamFrames = append(streamFrames, sf) + maxLen -= sf.Frame.Length(v) + lastFrame = sf + streamFrameLen += sf.Frame.Length(v) + } + // If the stream just became blocked on stream flow control, attempt to pack the + // STREAM_DATA_BLOCKED into the same packet. + if blocked != nil { + l := blocked.Length(v) + // In case it doesn't fit, queue it for the next packet. + if maxLen < l { + f.controlFrames = append(f.controlFrames, blocked) + break + } + frames = append(frames, ackhandler.Frame{Frame: blocked}) + maxLen -= l + controlFrameLen += l + } + } + + // The only way to become blocked on connection-level flow control is by sending STREAM frames. + if isBlocked, offset := f.connFlowController.IsNewlyBlocked(); isBlocked { + blocked := &wire.DataBlockedFrame{MaximumData: offset} + l := blocked.Length(v) + // In case it doesn't fit, queue it for the next packet. + if maxLen >= l { + frames = append(frames, ackhandler.Frame{Frame: blocked}) + controlFrameLen += l + } else { + f.controlFrames = append(f.controlFrames, blocked) + } + } + + f.mutex.Unlock() + f.controlFrameMutex.Unlock() + + if lastFrame.Frame != nil { + // account for the smaller size of the last STREAM frame + streamFrameLen -= lastFrame.Frame.Length(v) + lastFrame.Frame.DataLenPresent = false + streamFrameLen += lastFrame.Frame.Length(v) + } + + return frames, streamFrames, controlFrameLen + streamFrameLen +} + +func (f *framer) appendControlFrames( + frames []ackhandler.Frame, + maxLen protocol.ByteCount, + now time.Time, + v protocol.Version, +) ([]ackhandler.Frame, protocol.ByteCount) { var length protocol.ByteCount // add a PATH_RESPONSE first, but only pack a single PATH_RESPONSE per packet if len(f.pathResponses) > 0 { @@ -108,6 +168,29 @@ func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol } } + // add stream-related control frames + for id, str := range f.streamsWithControlFrames { + start: + remainingLen := maxLen - length + if remainingLen <= maxStreamControlFrameSize { + break + } + fr, ok, hasMore := str.getControlFrame(now) + if !hasMore { + delete(f.streamsWithControlFrames, id) + } + if !ok { + continue + } + frames = append(frames, fr) + length += fr.Frame.Length(v) + if hasMore { + // It is rare that a stream has more than one control frame to queue. + // We don't want to spawn another loop for just to cover that case. + goto start + } + } + for len(f.controlFrames) > 0 { frame := f.controlFrames[len(f.controlFrames)-1] frameLen := frame.Length(v) @@ -118,76 +201,77 @@ func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol length += frameLen f.controlFrames = f.controlFrames[:len(f.controlFrames)-1] } + return frames, length } -func (f *framerI) QueuedTooManyControlFrames() bool { +// QueuedTooManyControlFrames says if the control frame queue exceeded its maximum queue length. +// This is a hack. +// It is easier to implement than propagating an error return value in QueueControlFrame. +// The correct solution would be to queue frames with their respective structs. +// See https://github.com/quic-go/quic-go/issues/4271 for the queueing of stream-related control frames. +func (f *framer) QueuedTooManyControlFrames() bool { return f.queuedTooManyControlFrames } -func (f *framerI) AddActiveStream(id protocol.StreamID) { +func (f *framer) AddActiveStream(id protocol.StreamID, str sendStreamI) { f.mutex.Lock() if _, ok := f.activeStreams[id]; !ok { f.streamQueue.PushBack(id) - f.activeStreams[id] = struct{}{} + f.activeStreams[id] = str } f.mutex.Unlock() } -func (f *framerI) AppendStreamFrames(frames []ackhandler.StreamFrame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) { - startLen := len(frames) - var length protocol.ByteCount +func (f *framer) AddStreamWithControlFrames(id protocol.StreamID, str streamControlFrameGetter) { + f.controlFrameMutex.Lock() + if _, ok := f.streamsWithControlFrames[id]; !ok { + f.streamsWithControlFrames[id] = str + } + f.controlFrameMutex.Unlock() +} + +// RemoveActiveStream is called when a stream completes. +func (f *framer) RemoveActiveStream(id protocol.StreamID) { f.mutex.Lock() - // pop STREAM frames, until less than MinStreamFrameSize bytes are left in the packet - numActiveStreams := f.streamQueue.Len() - for i := 0; i < numActiveStreams; i++ { - if protocol.MinStreamFrameSize+length > maxLen { - break - } - id := f.streamQueue.PopFront() - // This should never return an error. Better check it anyway. - // The stream will only be in the streamQueue, if it enqueued itself there. - str, err := f.streamGetter.GetOrOpenSendStream(id) - // The stream can be nil if it completed after it said it had data. - if str == nil || err != nil { - delete(f.activeStreams, id) - continue - } - remainingLen := maxLen - length - // For the last STREAM frame, we'll remove the DataLen field later. - // Therefore, we can pretend to have more bytes available when popping - // the STREAM frame (which will always have the DataLen set). - remainingLen += protocol.ByteCount(quicvarint.Len(uint64(remainingLen))) - frame, ok, hasMoreData := str.popStreamFrame(remainingLen, v) - if hasMoreData { // put the stream back in the queue (at the end) - f.streamQueue.PushBack(id) - } else { // no more data to send. Stream is not active - delete(f.activeStreams, id) - } - // The frame can be "nil" - // * if the receiveStream was canceled after it said it had data - // * the remaining size doesn't allow us to add another STREAM frame - if !ok { - continue - } - frames = append(frames, frame) - length += frame.Frame.Length(v) - } + delete(f.activeStreams, id) + // We don't delete the stream from the streamQueue, + // since we'd have to iterate over the ringbuffer. + // Instead, we check if the stream is still in activeStreams when appending STREAM frames. f.mutex.Unlock() - if len(frames) > startLen { - l := frames[len(frames)-1].Frame.Length(v) - // account for the smaller size of the last STREAM frame - frames[len(frames)-1].Frame.DataLenPresent = false - length += frames[len(frames)-1].Frame.Length(v) - l - } - return frames, length } -func (f *framerI) Handle0RTTRejection() error { +func (f *framer) getNextStreamFrame(maxLen protocol.ByteCount, v protocol.Version) (ackhandler.StreamFrame, *wire.StreamDataBlockedFrame) { + id := f.streamQueue.PopFront() + // This should never return an error. Better check it anyway. + // The stream will only be in the streamQueue, if it enqueued itself there. + str, ok := f.activeStreams[id] + // The stream might have been removed after being enqueued. + if !ok { + return ackhandler.StreamFrame{}, nil + } + // For the last STREAM frame, we'll remove the DataLen field later. + // Therefore, we can pretend to have more bytes available when popping + // the STREAM frame (which will always have the DataLen set). + maxLen += protocol.ByteCount(quicvarint.Len(uint64(maxLen))) + frame, blocked, hasMoreData := str.popStreamFrame(maxLen, v) + if hasMoreData { // put the stream back in the queue (at the end) + f.streamQueue.PushBack(id) + } else { // no more data to send. Stream is not active + delete(f.activeStreams, id) + } + // Note that the frame.Frame can be nil: + // * if the stream was canceled after it said it had data + // * the remaining size doesn't allow us to add another STREAM frame + return frame, blocked +} + +func (f *framer) Handle0RTTRejection() { f.mutex.Lock() defer f.mutex.Unlock() - f.controlFrameMutex.Lock() + defer f.controlFrameMutex.Unlock() + f.streamQueue.Clear() for id := range f.activeStreams { delete(f.activeStreams, id) @@ -195,16 +279,13 @@ func (f *framerI) Handle0RTTRejection() error { var j int for i, frame := range f.controlFrames { switch frame.(type) { - case *wire.MaxDataFrame, *wire.MaxStreamDataFrame, *wire.MaxStreamsFrame: - return errors.New("didn't expect MAX_DATA / MAX_STREAM_DATA / MAX_STREAMS frame to be sent in 0-RTT") - case *wire.DataBlockedFrame, *wire.StreamDataBlockedFrame, *wire.StreamsBlockedFrame: + case *wire.MaxDataFrame, *wire.MaxStreamDataFrame, *wire.MaxStreamsFrame, + *wire.DataBlockedFrame, *wire.StreamDataBlockedFrame, *wire.StreamsBlockedFrame: continue default: f.controlFrames[j] = f.controlFrames[i] j++ } } - f.controlFrames = f.controlFrames[:j] - f.controlFrameMutex.Unlock() - return nil + f.controlFrames = slices.Delete(f.controlFrames, j, len(f.controlFrames)) } diff --git a/vendor/github.com/quic-go/quic-go/interface.go b/vendor/github.com/quic-go/quic-go/interface.go index a3e670f0..e144acc6 100644 --- a/vendor/github.com/quic-go/quic-go/interface.go +++ b/vendor/github.com/quic-go/quic-go/interface.go @@ -19,10 +19,6 @@ type StreamID = protocol.StreamID // A Version is a QUIC version number. type Version = protocol.Version -// A VersionNumber is a QUIC version number. -// Deprecated: VersionNumber was renamed to Version. -type VersionNumber = Version - const ( // Version1 is RFC 9000 Version1 = protocol.Version1 @@ -48,31 +44,34 @@ type TokenStore interface { } // Err0RTTRejected is the returned from: -// * Open{Uni}Stream{Sync} -// * Accept{Uni}Stream -// * Stream.Read and Stream.Write +// - Open{Uni}Stream{Sync} +// - Accept{Uni}Stream +// - Stream.Read and Stream.Write +// // when the server rejects a 0-RTT connection attempt. var Err0RTTRejected = errors.New("0-RTT rejected") -// ConnectionTracingKey can be used to associate a ConnectionTracer with a Connection. +// ConnectionTracingKey can be used to associate a [logging.ConnectionTracer] with a [Connection]. // It is set on the Connection.Context() context, // as well as on the context passed to logging.Tracer.NewConnectionTracer. +// // Deprecated: Applications can set their own tracing key using Transport.ConnContext. var ConnectionTracingKey = connTracingCtxKey{} // ConnectionTracingID is the type of the context value saved under the ConnectionTracingKey. +// // Deprecated: Applications can set their own tracing key using Transport.ConnContext. type ConnectionTracingID uint64 type connTracingCtxKey struct{} // QUICVersionContextKey can be used to find out the QUIC version of a TLS handshake from the -// context returned by tls.Config.ClientHelloInfo.Context. +// context returned by tls.Config.ClientInfo.Context. var QUICVersionContextKey = handshake.QUICVersionContextKey -// Stream is the interface implemented by QUIC streams -// In addition to the errors listed on the Connection, -// calls to stream functions can return a StreamError if the stream is canceled. +// Stream is the interface implemented by QUIC streams. +// In addition to the errors listed on the [Connection], +// calls to stream functions can return a [StreamError] if the stream is canceled. type Stream interface { ReceiveStream SendStream @@ -87,12 +86,8 @@ type ReceiveStream interface { // StreamID returns the stream ID. StreamID() StreamID // Read reads data from the stream. - // Read can be made to time out and return a net.Error with Timeout() == true - // after a fixed time limit; see SetDeadline and SetReadDeadline. - // If the stream was canceled by the peer, the error implements the StreamError - // interface, and Canceled() == true. - // If the connection was closed due to a timeout, the error satisfies - // the net.Error interface, and Timeout() will be true. + // Read can be made to time out using SetDeadline and SetReadDeadline. + // If the stream was canceled, the error is a StreamError. io.Reader // CancelRead aborts receiving on this stream. // It will ask the peer to stop transmitting stream data. @@ -102,7 +97,6 @@ type ReceiveStream interface { // SetReadDeadline sets the deadline for future Read calls and // any currently-blocked Read call. // A zero value for t means Read will not time out. - SetReadDeadline(t time.Time) error } @@ -111,12 +105,8 @@ type SendStream interface { // StreamID returns the stream ID. StreamID() StreamID // Write writes data to the stream. - // Write can be made to time out and return a net.Error with Timeout() == true - // after a fixed time limit; see SetDeadline and SetWriteDeadline. - // If the stream was canceled by the peer, the error implements the StreamError - // interface, and Canceled() == true. - // If the connection was closed due to a timeout, the error satisfies - // the net.Error interface, and Timeout() will be true. + // Write can be made to time out using SetDeadline and SetWriteDeadline. + // If the stream was canceled, the error is a StreamError. io.Writer // Close closes the write-direction of the stream. // Future calls to Write are not permitted after calling Close. @@ -146,45 +136,42 @@ type SendStream interface { // A Connection is a QUIC connection between two peers. // Calls to the connection (and to streams) can return the following types of errors: -// * ApplicationError: for errors triggered by the application running on top of QUIC -// * TransportError: for errors triggered by the QUIC transport (in many cases a misbehaving peer) -// * IdleTimeoutError: when the peer goes away unexpectedly (this is a net.Error timeout error) -// * HandshakeTimeoutError: when the cryptographic handshake takes too long (this is a net.Error timeout error) -// * StatelessResetError: when we receive a stateless reset (this is a net.Error temporary error) -// * VersionNegotiationError: returned by the client, when there's no version overlap between the peers +// - [ApplicationError]: for errors triggered by the application running on top of QUIC +// - [TransportError]: for errors triggered by the QUIC transport (in many cases a misbehaving peer) +// - [IdleTimeoutError]: when the peer goes away unexpectedly (this is a [net.Error] timeout error) +// - [HandshakeTimeoutError]: when the cryptographic handshake takes too long (this is a [net.Error] timeout error) +// - [StatelessResetError]: when we receive a stateless reset +// - [VersionNegotiationError]: returned by the client, when there's no version overlap between the peers type Connection interface { // AcceptStream returns the next stream opened by the peer, blocking until one is available. - // If the connection was closed due to a timeout, the error satisfies - // the net.Error interface, and Timeout() will be true. AcceptStream(context.Context) (Stream, error) // AcceptUniStream returns the next unidirectional stream opened by the peer, blocking until one is available. - // If the connection was closed due to a timeout, the error satisfies - // the net.Error interface, and Timeout() will be true. AcceptUniStream(context.Context) (ReceiveStream, error) // OpenStream opens a new bidirectional QUIC stream. // There is no signaling to the peer about new streams: - // The peer can only accept the stream after data has been sent on the stream. - // If the error is non-nil, it satisfies the net.Error interface. - // When reaching the peer's stream limit, err.Temporary() will be true. - // If the connection was closed due to a timeout, Timeout() will be true. + // The peer can only accept the stream after data has been sent on the stream, + // or the stream has been reset or closed. + // When reaching the peer's stream limit, it is not possible to open a new stream until the + // peer raises the stream limit. In that case, a StreamLimitReachedError is returned. OpenStream() (Stream, error) // OpenStreamSync opens a new bidirectional QUIC stream. // It blocks until a new stream can be opened. // There is no signaling to the peer about new streams: // The peer can only accept the stream after data has been sent on the stream, // or the stream has been reset or closed. - // If the error is non-nil, it satisfies the net.Error interface. - // If the connection was closed due to a timeout, Timeout() will be true. OpenStreamSync(context.Context) (Stream, error) // OpenUniStream opens a new outgoing unidirectional QUIC stream. - // If the error is non-nil, it satisfies the net.Error interface. - // When reaching the peer's stream limit, Temporary() will be true. - // If the connection was closed due to a timeout, Timeout() will be true. + // There is no signaling to the peer about new streams: + // The peer can only accept the stream after data has been sent on the stream, + // or the stream has been reset or closed. + // When reaching the peer's stream limit, it is not possible to open a new stream until the + // peer raises the stream limit. In that case, a StreamLimitReachedError is returned. OpenUniStream() (SendStream, error) // OpenUniStreamSync opens a new outgoing unidirectional QUIC stream. // It blocks until a new stream can be opened. - // If the error is non-nil, it satisfies the net.Error interface. - // If the connection was closed due to a timeout, Timeout() will be true. + // There is no signaling to the peer about new streams: + // The peer can only accept the stream after data has been sent on the stream, + // or the stream has been reset or closed. OpenUniStreamSync(context.Context) (SendStream, error) // LocalAddr returns the local address. LocalAddr() net.Addr @@ -209,6 +196,8 @@ type Connection interface { SendDatagram(payload []byte) error // ReceiveDatagram gets a message received in a datagram, as specified in RFC 9221. ReceiveDatagram(context.Context) ([]byte, error) + + AddPath(*Transport) (*Path, error) } // An EarlyConnection is a connection that is handshaking. @@ -238,27 +227,22 @@ type TokenGeneratorKey = handshake.TokenProtectorKey // as they are allowed by RFC 8999. type ConnectionID = protocol.ConnectionID -// ConnectionIDFromBytes interprets b as a Connection ID. It panics if b is +// ConnectionIDFromBytes interprets b as a [ConnectionID]. It panics if b is // longer than 20 bytes. func ConnectionIDFromBytes(b []byte) ConnectionID { return protocol.ParseConnectionID(b) } -// A ConnectionIDGenerator is an interface that allows clients to implement their own format -// for the Connection IDs that servers/clients use as SrcConnectionID in QUIC packets. -// -// Connection IDs generated by an implementation should always produce IDs of constant size. +// A ConnectionIDGenerator allows the application to take control over the generation of Connection IDs. +// Connection IDs generated by an implementation must be of constant length. type ConnectionIDGenerator interface { - // GenerateConnectionID generates a new ConnectionID. - // Generated ConnectionIDs should be unique and observers should not be able to correlate two ConnectionIDs. + // GenerateConnectionID generates a new Connection ID. + // Generated Connection IDs must be unique and observers should not be able to correlate two Connection IDs. GenerateConnectionID() (ConnectionID, error) - // ConnectionIDLen tells what is the length of the ConnectionIDs generated by the implementation of - // this interface. - // Effectively, this means that implementations of ConnectionIDGenerator must always return constant-size - // connection IDs. Valid lengths are between 0 and 20 and calls to GenerateConnectionID. - // 0-length ConnectionsIDs can be used when an endpoint (server or client) does not require multiplexing connections - // in the presence of a connection migration environment. + // ConnectionIDLen returns the length of Connection IDs generated by this implementation. + // Implementations must return constant-length Connection IDs with lengths between 0 and 20 bytes. + // A length of 0 can only be used when an endpoint doesn't need to multiplex connections during migration. ConnectionIDLen() int } @@ -266,7 +250,7 @@ type ConnectionIDGenerator interface { type Config struct { // GetConfigForClient is called for incoming connections. // If the error is not nil, the connection attempt is refused. - GetConfigForClient func(info *ClientHelloInfo) (*Config, error) + GetConfigForClient func(info *ClientInfo) (*Config, error) // The QUIC versions that can be negotiated. // If not set, it uses all versions available. Versions []Version @@ -327,10 +311,10 @@ type Config struct { // If set to 0, then no keep alive is sent. Otherwise, the keep alive is sent on that period (or at most // every half of MaxIdleTimeout, whichever is smaller). KeepAlivePeriod time.Duration - // InitialPacketSize is the initial size of packets sent. - // It is usually not necessary to manually set this value, - // since Path MTU discovery very quickly finds the path's MTU. - // If set too high, the path might not support packets that large, leading to a timeout of the QUIC handshake. + // InitialPacketSize is the initial size (and the lower limit) for packets sent. + // Under most circumstances, it is not necessary to manually set this value, + // since path MTU discovery quickly finds the path's MTU. + // If set too high, the path might not support packets of that size, leading to a timeout of the QUIC handshake. // Values below 1200 are invalid. InitialPacketSize uint16 // DisablePathMTUDiscovery disables Path MTU Discovery (RFC 8899). @@ -346,7 +330,12 @@ type Config struct { } // ClientHelloInfo contains information about an incoming connection attempt. -type ClientHelloInfo struct { +// +// Deprecated: Use ClientInfo instead. +type ClientHelloInfo = ClientInfo + +// ClientInfo contains information about an incoming connection attempt. +type ClientInfo struct { // RemoteAddr is the remote address on the Initial packet. // Unless AddrVerified is set, the address is not yet verified, and could be a spoofed IP address. RemoteAddr net.Addr @@ -356,19 +345,19 @@ type ClientHelloInfo struct { AddrVerified bool } -// ConnectionState records basic details about a QUIC connection +// ConnectionState records basic details about a QUIC connection. type ConnectionState struct { // TLS contains information about the TLS connection state, incl. the tls.ConnectionState. TLS tls.ConnectionState - // SupportsDatagrams says if support for QUIC datagrams (RFC 9221) was negotiated. - // This requires both nodes to support and enable the datagram extensions (via Config.EnableDatagrams). - // If datagram support was negotiated, datagrams can be sent and received using the - // SendDatagram and ReceiveDatagram methods on the Connection. + // SupportsDatagrams indicates whether the peer advertised support for QUIC datagrams (RFC 9221). + // When true, datagrams can be sent using the Connection's SendDatagram method. + // This is a unilateral declaration by the peer - receiving datagrams is only possible if + // datagram support was enabled locally via Config.EnableDatagrams. SupportsDatagrams bool // Used0RTT says if 0-RTT resumption was used. Used0RTT bool // Version is the QUIC version of the QUIC connection. Version Version - // GSO says if generic segmentation offload is used + // GSO says if generic segmentation offload is used. GSO bool } diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go index ba8cbbda..5fcce44d 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go @@ -10,14 +10,13 @@ import ( // SentPacketHandler handles ACKs received for outgoing packets type SentPacketHandler interface { // SentPacket may modify the packet - SentPacket(t time.Time, pn, largestAcked protocol.PacketNumber, streamFrames []StreamFrame, frames []Frame, encLevel protocol.EncryptionLevel, ecn protocol.ECN, size protocol.ByteCount, isPathMTUProbePacket bool) + SentPacket(t time.Time, pn, largestAcked protocol.PacketNumber, streamFrames []StreamFrame, frames []Frame, encLevel protocol.EncryptionLevel, ecn protocol.ECN, size protocol.ByteCount, isPathMTUProbePacket, isPathProbePacket bool) // ReceivedAck processes an ACK frame. // It does not store a copy of the frame. ReceivedAck(f *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) (bool /* 1-RTT packet acked */, error) - ReceivedBytes(protocol.ByteCount) - DropPackets(protocol.EncryptionLevel) - ResetForRetry(rcvTime time.Time) error - SetHandshakeConfirmed() + ReceivedBytes(_ protocol.ByteCount, rcvTime time.Time) + DropPackets(_ protocol.EncryptionLevel, rcvTime time.Time) + ResetForRetry(rcvTime time.Time) // The SendMode determines if and what kind of packets can be sent. SendMode(now time.Time) SendMode @@ -34,12 +33,14 @@ type SentPacketHandler interface { PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber GetLossDetectionTimeout() time.Time - OnLossDetectionTimeout() error + OnLossDetectionTimeout(now time.Time) error + + MigratedPath(now time.Time, initialMaxPacketSize protocol.ByteCount) } type sentPacketTracker interface { GetLowestPacketNotConfirmedAcked() protocol.PacketNumber - ReceivedPacket(protocol.EncryptionLevel) + ReceivedPacket(_ protocol.EncryptionLevel, rcvTime time.Time) } // ReceivedPacketHandler handles ACKs needed to send for incoming packets @@ -49,5 +50,5 @@ type ReceivedPacketHandler interface { DropPackets(protocol.EncryptionLevel) GetAlarmTimeout() time.Time - GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame + GetAckFrame(_ protocol.EncryptionLevel, now time.Time, onlyIfQueued bool) *wire.AckFrame } diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go index 5f43689b..c634939a 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go @@ -22,10 +22,11 @@ type packet struct { includedInBytesInFlight bool declaredLost bool skippedPacket bool + isPathProbePacket bool } func (p *packet) outstanding() bool { - return !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket + return !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket && !p.isPathProbePacket } var packetPool = sync.Pool{New: func() any { return &packet{} }} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go index 1175c790..eda0826c 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go @@ -38,7 +38,7 @@ func (h *receivedPacketHandler) ReceivedPacket( rcvTime time.Time, ackEliciting bool, ) error { - h.sentPackets.ReceivedPacket(encLevel) + h.sentPackets.ReceivedPacket(encLevel, rcvTime) switch encLevel { case protocol.EncryptionInitial: return h.initialPackets.ReceivedPacket(pn, ecn, rcvTime, ackEliciting) @@ -87,7 +87,7 @@ func (h *receivedPacketHandler) GetAlarmTimeout() time.Time { return h.appDataPackets.GetAlarmTimeout() } -func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame { +func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, now time.Time, onlyIfQueued bool) *wire.AckFrame { //nolint:exhaustive // 0-RTT packets can't contain ACK frames. switch encLevel { case protocol.EncryptionInitial: @@ -101,7 +101,7 @@ func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, o } return nil case protocol.Encryption1RTT: - return h.appDataPackets.GetAckFrame(onlyIfQueued) + return h.appDataPackets.GetAckFrame(now, onlyIfQueued) default: // 0-RTT packets can't contain ACK frames return nil diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go index 3143bfe1..f9feae1d 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go @@ -1,10 +1,9 @@ package ackhandler import ( - "sync" + "slices" "github.com/quic-go/quic-go/internal/protocol" - list "github.com/quic-go/quic-go/internal/utils/linkedlist" "github.com/quic-go/quic-go/internal/wire" ) @@ -14,25 +13,17 @@ type interval struct { End protocol.PacketNumber } -var intervalElementPool sync.Pool - -func init() { - intervalElementPool = *list.NewPool[interval]() -} - // The receivedPacketHistory stores if a packet number has already been received. // It generates ACK ranges which can be used to assemble an ACK frame. // It does not store packet contents. type receivedPacketHistory struct { - ranges *list.List[interval] + ranges []interval // maximum length: protocol.MaxNumAckRanges deletedBelow protocol.PacketNumber } func newReceivedPacketHistory() *receivedPacketHistory { - return &receivedPacketHistory{ - ranges: list.NewWithPool[interval](&intervalElementPool), - } + return &receivedPacketHistory{} } // ReceivedPacket registers a packet with PacketNumber p and updates the ranges @@ -41,58 +32,54 @@ func (h *receivedPacketHistory) ReceivedPacket(p protocol.PacketNumber) bool /* if p < h.deletedBelow { return false } + isNew := h.addToRanges(p) - h.maybeDeleteOldRanges() + // Delete old ranges, if we're tracking too many of them. + // This is a DoS defense against a peer that sends us too many gaps. + if len(h.ranges) > protocol.MaxNumAckRanges { + h.ranges = slices.Delete(h.ranges, 0, len(h.ranges)-protocol.MaxNumAckRanges) + } return isNew } func (h *receivedPacketHistory) addToRanges(p protocol.PacketNumber) bool /* is a new packet (and not a duplicate / delayed packet) */ { - if h.ranges.Len() == 0 { - h.ranges.PushBack(interval{Start: p, End: p}) + if len(h.ranges) == 0 { + h.ranges = append(h.ranges, interval{Start: p, End: p}) return true } - for el := h.ranges.Back(); el != nil; el = el.Prev() { + for i := len(h.ranges) - 1; i >= 0; i-- { // p already included in an existing range. Nothing to do here - if p >= el.Value.Start && p <= el.Value.End { + if p >= h.ranges[i].Start && p <= h.ranges[i].End { return false } - if el.Value.End == p-1 { // extend a range at the end - el.Value.End = p + if h.ranges[i].End == p-1 { // extend a range at the end + h.ranges[i].End = p return true } - if el.Value.Start == p+1 { // extend a range at the beginning - el.Value.Start = p + if h.ranges[i].Start == p+1 { // extend a range at the beginning + h.ranges[i].Start = p - prev := el.Prev() - if prev != nil && prev.Value.End+1 == el.Value.Start { // merge two ranges - prev.Value.End = el.Value.End - h.ranges.Remove(el) + if i > 0 && h.ranges[i-1].End+1 == h.ranges[i].Start { // merge two ranges + h.ranges[i-1].End = h.ranges[i].End + h.ranges = slices.Delete(h.ranges, i, i+1) } return true } - // create a new range at the end - if p > el.Value.End { - h.ranges.InsertAfter(interval{Start: p, End: p}, el) + // create a new range after the current one + if p > h.ranges[i].End { + h.ranges = slices.Insert(h.ranges, i+1, interval{Start: p, End: p}) return true } } // create a new range at the beginning - h.ranges.InsertBefore(interval{Start: p, End: p}, h.ranges.Front()) + h.ranges = slices.Insert(h.ranges, 0, interval{Start: p, End: p}) return true } -// Delete old ranges, if we're tracking more than 500 of them. -// This is a DoS defense against a peer that sends us too many gaps. -func (h *receivedPacketHistory) maybeDeleteOldRanges() { - for h.ranges.Len() > protocol.MaxNumAckRanges { - h.ranges.Remove(h.ranges.Front()) - } -} - // DeleteBelow deletes all entries below (but not including) p func (h *receivedPacketHistory) DeleteBelow(p protocol.PacketNumber) { if p < h.deletedBelow { @@ -100,37 +87,39 @@ func (h *receivedPacketHistory) DeleteBelow(p protocol.PacketNumber) { } h.deletedBelow = p - nextEl := h.ranges.Front() - for el := h.ranges.Front(); nextEl != nil; el = nextEl { - nextEl = el.Next() + if len(h.ranges) == 0 { + return + } - if el.Value.End < p { // delete a whole range - h.ranges.Remove(el) - } else if p > el.Value.Start && p <= el.Value.End { - el.Value.Start = p - return + idx := -1 + for i := 0; i < len(h.ranges); i++ { + if h.ranges[i].End < p { // delete a whole range + idx = i + } else if p > h.ranges[i].Start && p <= h.ranges[i].End { + h.ranges[i].Start = p + break } else { // no ranges affected. Nothing to do - return + break } } + if idx >= 0 { + h.ranges = slices.Delete(h.ranges, 0, idx+1) + } } // AppendAckRanges appends to a slice of all AckRanges that can be used in an AckFrame func (h *receivedPacketHistory) AppendAckRanges(ackRanges []wire.AckRange) []wire.AckRange { - if h.ranges.Len() > 0 { - for el := h.ranges.Back(); el != nil; el = el.Prev() { - ackRanges = append(ackRanges, wire.AckRange{Smallest: el.Value.Start, Largest: el.Value.End}) - } + for i := len(h.ranges) - 1; i >= 0; i-- { + ackRanges = append(ackRanges, wire.AckRange{Smallest: h.ranges[i].Start, Largest: h.ranges[i].End}) } return ackRanges } func (h *receivedPacketHistory) GetHighestAckRange() wire.AckRange { ackRange := wire.AckRange{} - if h.ranges.Len() > 0 { - r := h.ranges.Back().Value - ackRange.Smallest = r.Start - ackRange.Largest = r.End + if len(h.ranges) > 0 { + ackRange.Smallest = h.ranges[len(h.ranges)-1].Start + ackRange.Largest = h.ranges[len(h.ranges)-1].End } return ackRange } @@ -139,11 +128,12 @@ func (h *receivedPacketHistory) IsPotentiallyDuplicate(p protocol.PacketNumber) if p < h.deletedBelow { return true } - for el := h.ranges.Back(); el != nil; el = el.Prev() { - if p > el.Value.End { + // Iterating over the slices is faster than using a binary search (using slices.BinarySearchFunc). + for i := len(h.ranges) - 1; i >= 0; i-- { + if p > h.ranges[i].End { return false } - if p <= el.Value.End && p >= el.Value.Start { + if p <= h.ranges[i].End && p >= h.ranges[i].Start { return true } } diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go index 08af6f1e..d1d26f4a 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go @@ -196,8 +196,7 @@ func (h *appDataReceivedPacketTracker) shouldQueueACK(pn protocol.PacketNumber, return false } -func (h *appDataReceivedPacketTracker) GetAckFrame(onlyIfQueued bool) *wire.AckFrame { - now := time.Now() +func (h *appDataReceivedPacketTracker) GetAckFrame(now time.Time, onlyIfQueued bool) *wire.AckFrame { if onlyIfQueued && !h.ackQueued { if h.ackAlarm.IsZero() || h.ackAlarm.After(now) { return nil diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go index 3cef8923..a53be7d7 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go @@ -27,8 +27,11 @@ const ( maxPTODuration = 60 * time.Second ) +// Path probe packets are declared lost after this time. +const pathProbePacketLossTimeout = time.Second + type packetNumberSpace struct { - history *sentPacketHistory + history sentPacketHistory pns packetNumberGenerator lossTime time.Time @@ -38,21 +41,27 @@ type packetNumberSpace struct { largestSent protocol.PacketNumber } -func newPacketNumberSpace(initialPN protocol.PacketNumber, skipPNs bool) *packetNumberSpace { +func newPacketNumberSpace(initialPN protocol.PacketNumber, isAppData bool) *packetNumberSpace { var pns packetNumberGenerator - if skipPNs { + if isAppData { pns = newSkippingPacketNumberGenerator(initialPN, protocol.SkipPacketInitialPeriod, protocol.SkipPacketMaxPeriod) } else { pns = newSequentialPacketNumberGenerator(initialPN) } return &packetNumberSpace{ - history: newSentPacketHistory(), + history: *newSentPacketHistory(isAppData), pns: pns, largestSent: protocol.InvalidPacketNumber, largestAcked: protocol.InvalidPacketNumber, } } +type alarmTimer struct { + Time time.Time + TimerType logging.TimerType + EncryptionLevel protocol.EncryptionLevel +} + type sentPacketHandler struct { initialPackets *packetNumberSpace handshakePackets *packetNumberSpace @@ -90,7 +99,7 @@ type sentPacketHandler struct { numProbesToSend int // The alarm timeout - alarm time.Time + alarm alarmTimer enableECN bool ecnTracker ecnHandler @@ -155,7 +164,7 @@ func (h *sentPacketHandler) removeFromBytesInFlight(p *packet) { } } -func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) { +func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel, now time.Time) { // The server won't await address validation after the handshake is confirmed. // This applies even if we didn't receive an ACK for a Handshake packet. if h.perspective == protocol.PerspectiveClient && encLevel == protocol.EncryptionHandshake { @@ -168,10 +177,9 @@ func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) { if pnSpace == nil { return } - pnSpace.history.Iterate(func(p *packet) (bool, error) { + for p := range pnSpace.history.Packets() { h.removeFromBytesInFlight(p) - return true, nil - }) + } } // drop the packet history //nolint:exhaustive // Not every packet number space can be dropped. @@ -179,20 +187,22 @@ func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) { case protocol.EncryptionInitial: h.initialPackets = nil case protocol.EncryptionHandshake: + // Dropping the handshake packet number space means that the handshake is confirmed, + // see section 4.9.2 of RFC 9001. + h.handshakeConfirmed = true h.handshakePackets = nil case protocol.Encryption0RTT: // This function is only called when 0-RTT is rejected, // and not when the client drops 0-RTT keys when the handshake completes. // When 0-RTT is rejected, all application data sent so far becomes invalid. // Delete the packets from the history and remove them from bytes_in_flight. - h.appDataPackets.history.Iterate(func(p *packet) (bool, error) { + for p := range h.appDataPackets.history.Packets() { if p.EncryptionLevel != protocol.Encryption0RTT && !p.skippedPacket { - return false, nil + break } h.removeFromBytesInFlight(p) h.appDataPackets.history.Remove(p.PacketNumber) - return true, nil - }) + } default: panic(fmt.Sprintf("Cannot drop keys for encryption level %s", encLevel)) } @@ -202,21 +212,21 @@ func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) { h.ptoCount = 0 h.numProbesToSend = 0 h.ptoMode = SendNone - h.setLossDetectionTimer() + h.setLossDetectionTimer(now) } -func (h *sentPacketHandler) ReceivedBytes(n protocol.ByteCount) { +func (h *sentPacketHandler) ReceivedBytes(n protocol.ByteCount, t time.Time) { wasAmplificationLimit := h.isAmplificationLimited() h.bytesReceived += n if wasAmplificationLimit && !h.isAmplificationLimited() { - h.setLossDetectionTimer() + h.setLossDetectionTimer(t) } } -func (h *sentPacketHandler) ReceivedPacket(l protocol.EncryptionLevel) { +func (h *sentPacketHandler) ReceivedPacket(l protocol.EncryptionLevel, t time.Time) { if h.perspective == protocol.PerspectiveServer && l == protocol.EncryptionHandshake && !h.peerAddressValidated { h.peerAddressValidated = true - h.setLossDetectionTimer() + h.setLossDetectionTimer(t) } } @@ -240,11 +250,12 @@ func (h *sentPacketHandler) SentPacket( ecn protocol.ECN, size protocol.ByteCount, isPathMTUProbePacket bool, + isPathProbePacket bool, ) { h.bytesSent += size pnSpace := h.getPacketNumberSpace(encLevel) - if h.logger.Debug() && pnSpace.history.HasOutstandingPackets() { + if h.logger.Debug() && (pnSpace.history.HasOutstandingPackets() || pnSpace.history.HasOutstandingPathProbes()) { for p := max(0, pnSpace.largestSent+1); p < pn; p++ { h.logger.Debugf("Skipping packet number %d", p) } @@ -253,6 +264,18 @@ func (h *sentPacketHandler) SentPacket( pnSpace.largestSent = pn isAckEliciting := len(streamFrames) > 0 || len(frames) > 0 + if isPathProbePacket { + p := getPacket() + p.SendTime = t + p.PacketNumber = pn + p.EncryptionLevel = encLevel + p.Length = size + p.Frames = frames + p.isPathProbePacket = true + pnSpace.history.SentPathProbePacket(p) + h.setLossDetectionTimer(t) + return + } if isAckEliciting { pnSpace.lastAckElicitingPacketTime = t h.bytesInFlight += size @@ -269,7 +292,7 @@ func (h *sentPacketHandler) SentPacket( if !isAckEliciting { pnSpace.history.SentNonAckElicitingPacket(pn) if !h.peerCompletedAddressValidation { - h.setLossDetectionTimer() + h.setLossDetectionTimer(t) } return } @@ -289,7 +312,7 @@ func (h *sentPacketHandler) SentPacket( if h.tracer != nil && h.tracer.UpdatedMetrics != nil { h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight()) } - h.setLossDetectionTimer() + h.setLossDetectionTimer(t) } func (h *sentPacketHandler) getPacketNumberSpace(encLevel protocol.EncryptionLevel) *packetNumberSpace { @@ -322,7 +345,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En h.peerCompletedAddressValidation = true h.logger.Debugf("Peer doesn't await address validation any longer.") // Make sure that the timer is reset, even if this ACK doesn't acknowledge any (ack-eliciting) packets. - h.setLossDetectionTimer() + h.setLossDetectionTimer(rcvTime) } priorInFlight := h.bytesInFlight @@ -332,13 +355,13 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En } // update the RTT, if the largest acked is newly acknowledged if len(ackedPackets) > 0 { - if p := ackedPackets[len(ackedPackets)-1]; p.PacketNumber == ack.LargestAcked() { + if p := ackedPackets[len(ackedPackets)-1]; p.PacketNumber == ack.LargestAcked() && !p.isPathProbePacket { // don't use the ack delay for Initial and Handshake packets var ackDelay time.Duration if encLevel == protocol.Encryption1RTT { ackDelay = min(ack.DelayTime, h.rttStats.MaxAckDelay()) } - h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay, rcvTime) + h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay) if h.logger.Debug() { h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation()) } @@ -356,8 +379,9 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En pnSpace.largestAcked = max(pnSpace.largestAcked, largestAcked) - if err := h.detectLostPackets(rcvTime, encLevel); err != nil { - return false, err + h.detectLostPackets(rcvTime, encLevel) + if encLevel == protocol.Encryption1RTT { + h.detectLostPathProbes(rcvTime) } var acked1RTTPacket bool for _, p := range ackedPackets { @@ -368,7 +392,9 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En acked1RTTPacket = true } h.removeFromBytesInFlight(p) - putPacket(p) + if !p.isPathProbePacket { + putPacket(p) + } } // After this point, we must not use ackedPackets any longer! // We've already returned the buffers. @@ -387,7 +413,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight()) } - h.setLossDetectionTimer() + h.setLossDetectionTimer(rcvTime) return acked1RTTPacket, nil } @@ -402,14 +428,13 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL ackRangeIndex := 0 lowestAcked := ack.LowestAcked() largestAcked := ack.LargestAcked() - err := pnSpace.history.Iterate(func(p *packet) (bool, error) { - // Ignore packets below the lowest acked + for p := range pnSpace.history.Packets() { + // ignore packets below the lowest acked if p.PacketNumber < lowestAcked { - return true, nil + continue } - // Break after largest acked is reached if p.PacketNumber > largestAcked { - return false, nil + break } if ack.HasMissingRanges() { @@ -421,21 +446,28 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL } if p.PacketNumber < ackRange.Smallest { // packet not contained in ACK range - return true, nil + continue } if p.PacketNumber > ackRange.Largest { - return false, fmt.Errorf("BUG: ackhandler would have acked wrong packet %d, while evaluating range %d -> %d", p.PacketNumber, ackRange.Smallest, ackRange.Largest) + return nil, fmt.Errorf("BUG: ackhandler would have acked wrong packet %d, while evaluating range %d -> %d", p.PacketNumber, ackRange.Smallest, ackRange.Largest) } } if p.skippedPacket { - return false, &qerr.TransportError{ + return nil, &qerr.TransportError{ ErrorCode: qerr.ProtocolViolation, ErrorMessage: fmt.Sprintf("received an ACK for skipped packet number: %d (%s)", p.PacketNumber, encLevel), } } + if p.isPathProbePacket { + probePacket := pnSpace.history.RemovePathProbe(p.PacketNumber) + // the probe packet might already have been declared lost + if probePacket != nil { + h.ackedPackets = append(h.ackedPackets, probePacket) + } + continue + } h.ackedPackets = append(h.ackedPackets, p) - return true, nil - }) + } if h.logger.Debug() && len(h.ackedPackets) > 0 { pns := make([]protocol.PacketNumber, len(h.ackedPackets)) for i, p := range h.ackedPackets { @@ -466,8 +498,7 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL h.tracer.AcknowledgedPacket(encLevel, p.PacketNumber) } } - - return h.ackedPackets, err + return h.ackedPackets, nil } func (h *sentPacketHandler) getLossTimeAndSpace() (time.Time, protocol.EncryptionLevel) { @@ -498,41 +529,44 @@ func (h *sentPacketHandler) getScaledPTO(includeMaxAckDelay bool) time.Duration } // same logic as getLossTimeAndSpace, but for lastAckElicitingPacketTime instead of lossTime -func (h *sentPacketHandler) getPTOTimeAndSpace() (pto time.Time, encLevel protocol.EncryptionLevel, ok bool) { +func (h *sentPacketHandler) getPTOTimeAndSpace(now time.Time) (pto time.Time, encLevel protocol.EncryptionLevel) { // We only send application data probe packets once the handshake is confirmed, // because before that, we don't have the keys to decrypt ACKs sent in 1-RTT packets. if !h.handshakeConfirmed && !h.hasOutstandingCryptoPackets() { if h.peerCompletedAddressValidation { return } - t := time.Now().Add(h.getScaledPTO(false)) + t := now.Add(h.getScaledPTO(false)) if h.initialPackets != nil { - return t, protocol.EncryptionInitial, true + return t, protocol.EncryptionInitial } - return t, protocol.EncryptionHandshake, true + return t, protocol.EncryptionHandshake } - if h.initialPackets != nil { + if h.initialPackets != nil && h.initialPackets.history.HasOutstandingPackets() && + !h.initialPackets.lastAckElicitingPacketTime.IsZero() { encLevel = protocol.EncryptionInitial if t := h.initialPackets.lastAckElicitingPacketTime; !t.IsZero() { pto = t.Add(h.getScaledPTO(false)) } } - if h.handshakePackets != nil && !h.handshakePackets.lastAckElicitingPacketTime.IsZero() { + if h.handshakePackets != nil && h.handshakePackets.history.HasOutstandingPackets() && + !h.handshakePackets.lastAckElicitingPacketTime.IsZero() { t := h.handshakePackets.lastAckElicitingPacketTime.Add(h.getScaledPTO(false)) if pto.IsZero() || (!t.IsZero() && t.Before(pto)) { pto = t encLevel = protocol.EncryptionHandshake } } - if h.handshakeConfirmed && !h.appDataPackets.lastAckElicitingPacketTime.IsZero() { + if h.handshakeConfirmed && h.appDataPackets.history.HasOutstandingPackets() && + !h.appDataPackets.lastAckElicitingPacketTime.IsZero() { t := h.appDataPackets.lastAckElicitingPacketTime.Add(h.getScaledPTO(true)) if pto.IsZero() || (!t.IsZero() && t.Before(pto)) { pto = t encLevel = protocol.Encryption1RTT } } - return pto, encLevel, true + return pto, encLevel } func (h *sentPacketHandler) hasOutstandingCryptoPackets() bool { @@ -545,65 +579,91 @@ func (h *sentPacketHandler) hasOutstandingCryptoPackets() bool { return false } -func (h *sentPacketHandler) hasOutstandingPackets() bool { - return h.appDataPackets.history.HasOutstandingPackets() || h.hasOutstandingCryptoPackets() -} - -func (h *sentPacketHandler) setLossDetectionTimer() { +func (h *sentPacketHandler) setLossDetectionTimer(now time.Time) { oldAlarm := h.alarm // only needed in case tracing is enabled - lossTime, encLevel := h.getLossTimeAndSpace() - if !lossTime.IsZero() { - // Early retransmit timer or time loss detection. - h.alarm = lossTime - if h.tracer != nil && h.tracer.SetLossTimer != nil && h.alarm != oldAlarm { - h.tracer.SetLossTimer(logging.TimerTypeACK, encLevel, h.alarm) + newAlarm := h.lossDetectionTime(now) + h.alarm = newAlarm + + hasAlarm := !newAlarm.Time.IsZero() + if !hasAlarm && !oldAlarm.Time.IsZero() { + h.logger.Debugf("Canceling loss detection timer.") + if h.tracer != nil && h.tracer.LossTimerCanceled != nil { + h.tracer.LossTimerCanceled() } - return } - // Cancel the alarm if amplification limited. - if h.isAmplificationLimited() { - h.alarm = time.Time{} - if !oldAlarm.IsZero() { - h.logger.Debugf("Canceling loss detection timer. Amplification limited.") - if h.tracer != nil && h.tracer.LossTimerCanceled != nil { - h.tracer.LossTimerCanceled() - } - } - return - } - - // Cancel the alarm if no packets are outstanding - if !h.hasOutstandingPackets() && h.peerCompletedAddressValidation { - h.alarm = time.Time{} - if !oldAlarm.IsZero() { - h.logger.Debugf("Canceling loss detection timer. No packets in flight.") - if h.tracer != nil && h.tracer.LossTimerCanceled != nil { - h.tracer.LossTimerCanceled() - } - } - return - } - - // PTO alarm - ptoTime, encLevel, ok := h.getPTOTimeAndSpace() - if !ok { - if !oldAlarm.IsZero() { - h.alarm = time.Time{} - h.logger.Debugf("Canceling loss detection timer. No PTO needed..") - if h.tracer != nil && h.tracer.LossTimerCanceled != nil { - h.tracer.LossTimerCanceled() - } - } - return - } - h.alarm = ptoTime - if h.tracer != nil && h.tracer.SetLossTimer != nil && h.alarm != oldAlarm { - h.tracer.SetLossTimer(logging.TimerTypePTO, encLevel, h.alarm) + if hasAlarm && h.tracer != nil && h.tracer.SetLossTimer != nil && newAlarm != oldAlarm { + h.tracer.SetLossTimer(newAlarm.TimerType, newAlarm.EncryptionLevel, newAlarm.Time) } } -func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.EncryptionLevel) error { +func (h *sentPacketHandler) lossDetectionTime(now time.Time) alarmTimer { + // cancel the alarm if no packets are outstanding + if h.peerCompletedAddressValidation && !h.hasOutstandingCryptoPackets() && + !h.appDataPackets.history.HasOutstandingPackets() && !h.appDataPackets.history.HasOutstandingPathProbes() { + return alarmTimer{} + } + + // cancel the alarm if amplification limited + if h.isAmplificationLimited() { + return alarmTimer{} + } + + var pathProbeLossTime time.Time + if h.appDataPackets.history.HasOutstandingPathProbes() { + if p := h.appDataPackets.history.FirstOutstandingPathProbe(); p != nil { + pathProbeLossTime = p.SendTime.Add(pathProbePacketLossTimeout) + } + } + + // early retransmit timer or time loss detection + lossTime, encLevel := h.getLossTimeAndSpace() + if !lossTime.IsZero() && (pathProbeLossTime.IsZero() || lossTime.Before(pathProbeLossTime)) { + return alarmTimer{ + Time: lossTime, + TimerType: logging.TimerTypeACK, + EncryptionLevel: encLevel, + } + } + ptoTime, encLevel := h.getPTOTimeAndSpace(now) + if !ptoTime.IsZero() && (pathProbeLossTime.IsZero() || ptoTime.Before(pathProbeLossTime)) { + return alarmTimer{ + Time: ptoTime, + TimerType: logging.TimerTypePTO, + EncryptionLevel: encLevel, + } + } + if !pathProbeLossTime.IsZero() { + return alarmTimer{ + Time: pathProbeLossTime, + TimerType: logging.TimerTypePathProbe, + EncryptionLevel: protocol.Encryption1RTT, + } + } + return alarmTimer{} +} + +func (h *sentPacketHandler) detectLostPathProbes(now time.Time) { + if !h.appDataPackets.history.HasOutstandingPathProbes() { + return + } + lossTime := now.Add(-pathProbePacketLossTimeout) + // RemovePathProbe cannot be called while iterating. + var lostPathProbes []*packet + for p := range h.appDataPackets.history.PathProbes() { + if !p.SendTime.After(lossTime) { + lostPathProbes = append(lostPathProbes, p) + } + } + for _, p := range lostPathProbes { + for _, f := range p.Frames { + f.Handler.OnLost(f.Frame) + } + h.appDataPackets.history.RemovePathProbe(p.PacketNumber) + } +} + +func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.EncryptionLevel) { pnSpace := h.getPacketNumberSpace(encLevel) pnSpace.lossTime = time.Time{} @@ -617,15 +677,16 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E lostSendTime := now.Add(-lossDelay) priorInFlight := h.bytesInFlight - return pnSpace.history.Iterate(func(p *packet) (bool, error) { + for p := range pnSpace.history.Packets() { if p.PacketNumber > pnSpace.largestAcked { - return false, nil + break } + isRegularPacket := !p.skippedPacket && !p.isPathProbePacket var packetLost bool - if p.SendTime.Before(lostSendTime) { + if !p.SendTime.After(lostSendTime) { packetLost = true - if !p.skippedPacket { + if isRegularPacket { if h.logger.Debug() { h.logger.Debugf("\tlost packet %d (time threshold)", p.PacketNumber) } @@ -635,7 +696,7 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E } } else if pnSpace.largestAcked >= p.PacketNumber+packetThreshold { packetLost = true - if !p.skippedPacket { + if isRegularPacket { if h.logger.Debug() { h.logger.Debugf("\tlost packet %d (reordering threshold)", p.PacketNumber) } @@ -653,7 +714,7 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E } if packetLost { pnSpace.history.DeclareLost(p.PacketNumber) - if !p.skippedPacket { + if isRegularPacket { // the bytes in flight need to be reduced no matter if the frames in this packet will be retransmitted h.removeFromBytesInFlight(p) h.queueFramesForRetransmission(p) @@ -665,12 +726,16 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E } } } - return true, nil - }) + } } -func (h *sentPacketHandler) OnLossDetectionTimeout() error { - defer h.setLossDetectionTimer() +func (h *sentPacketHandler) OnLossDetectionTimeout(now time.Time) error { + defer h.setLossDetectionTimer(now) + + if h.handshakeConfirmed { + h.detectLostPathProbes(now) + } + earliestLossTime, encLevel := h.getLossTimeAndSpace() if !earliestLossTime.IsZero() { if h.logger.Debug() { @@ -680,13 +745,14 @@ func (h *sentPacketHandler) OnLossDetectionTimeout() error { h.tracer.LossTimerExpired(logging.TimerTypeACK, encLevel) } // Early retransmit or time loss detection - return h.detectLostPackets(time.Now(), encLevel) + h.detectLostPackets(now, encLevel) + return nil } // PTO - // When all outstanding are acknowledged, the alarm is canceled in - // setLossDetectionTimer. This doesn't reset the timer in the session though. - // When OnAlarm is called, we therefore need to make sure that there are + // When all outstanding are acknowledged, the alarm is canceled in setLossDetectionTimer. + // However, there's no way to reset the timer in the connection. + // When OnLossDetectionTimeout is called, we therefore need to make sure that there are // actually packets outstanding. if h.bytesInFlight == 0 && !h.peerCompletedAddressValidation { h.ptoCount++ @@ -701,11 +767,12 @@ func (h *sentPacketHandler) OnLossDetectionTimeout() error { return nil } - _, encLevel, ok := h.getPTOTimeAndSpace() - if !ok { + ptoTime, encLevel := h.getPTOTimeAndSpace(now) + if ptoTime.IsZero() { return nil } - if ps := h.getPacketNumberSpace(encLevel); !ps.history.HasOutstandingPackets() && !h.peerCompletedAddressValidation { + ps := h.getPacketNumberSpace(encLevel) + if !ps.history.HasOutstandingPackets() && !ps.history.HasOutstandingPathProbes() && !h.peerCompletedAddressValidation { return nil } h.ptoCount++ @@ -739,7 +806,7 @@ func (h *sentPacketHandler) OnLossDetectionTimeout() error { } func (h *sentPacketHandler) GetLossDetectionTimeout() time.Time { - return h.alarm + return h.alarm.Time } func (h *sentPacketHandler) ECNMode(isShortHeaderPacket bool) protocol.ECN { @@ -756,7 +823,7 @@ func (h *sentPacketHandler) PeekPacketNumber(encLevel protocol.EncryptionLevel) pnSpace := h.getPacketNumberSpace(encLevel) pn := pnSpace.pns.Peek() // See section 17.1 of RFC 9000. - return pn, protocol.GetPacketNumberLengthForHeader(pn, pnSpace.largestAcked) + return pn, protocol.PacketNumberLengthForHeader(pn, pnSpace.largestAcked) } func (h *sentPacketHandler) PopPacketNumber(encLevel protocol.EncryptionLevel) protocol.PacketNumber { @@ -864,33 +931,30 @@ func (h *sentPacketHandler) queueFramesForRetransmission(p *packet) { p.Frames = nil } -func (h *sentPacketHandler) ResetForRetry(now time.Time) error { +func (h *sentPacketHandler) ResetForRetry(now time.Time) { h.bytesInFlight = 0 var firstPacketSendTime time.Time - h.initialPackets.history.Iterate(func(p *packet) (bool, error) { + for p := range h.initialPackets.history.Packets() { if firstPacketSendTime.IsZero() { firstPacketSendTime = p.SendTime } - if p.declaredLost || p.skippedPacket { - return true, nil - } - h.queueFramesForRetransmission(p) - return true, nil - }) - // All application data packets sent at this point are 0-RTT packets. - // In the case of a Retry, we can assume that the server dropped all of them. - h.appDataPackets.history.Iterate(func(p *packet) (bool, error) { if !p.declaredLost && !p.skippedPacket { h.queueFramesForRetransmission(p) } - return true, nil - }) + } + // All application data packets sent at this point are 0-RTT packets. + // In the case of a Retry, we can assume that the server dropped all of them. + for p := range h.appDataPackets.history.Packets() { + if !p.declaredLost && !p.skippedPacket { + h.queueFramesForRetransmission(p) + } + } // Only use the Retry to estimate the RTT if we didn't send any retransmission for the Initial. // Otherwise, we don't know which Initial the Retry was sent in response to. if h.ptoCount == 0 { // Don't set the RTT to a value lower than 5ms here. - h.rttStats.UpdateRTT(max(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0, now) + h.rttStats.UpdateRTT(max(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0) if h.logger.Debug() { h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation()) } @@ -901,28 +965,36 @@ func (h *sentPacketHandler) ResetForRetry(now time.Time) error { h.initialPackets = newPacketNumberSpace(h.initialPackets.pns.Peek(), false) h.appDataPackets = newPacketNumberSpace(h.appDataPackets.pns.Peek(), true) oldAlarm := h.alarm - h.alarm = time.Time{} + h.alarm = alarmTimer{} if h.tracer != nil { if h.tracer.UpdatedPTOCount != nil { h.tracer.UpdatedPTOCount(0) } - if !oldAlarm.IsZero() && h.tracer.LossTimerCanceled != nil { + if !oldAlarm.Time.IsZero() && h.tracer.LossTimerCanceled != nil { h.tracer.LossTimerCanceled() } } h.ptoCount = 0 - return nil } -func (h *sentPacketHandler) SetHandshakeConfirmed() { - if h.initialPackets != nil { - panic("didn't drop initial correctly") +func (h *sentPacketHandler) MigratedPath(now time.Time, initialMaxDatagramSize protocol.ByteCount) { + h.rttStats.ResetForPathMigration() + for p := range h.appDataPackets.history.Packets() { + h.appDataPackets.history.DeclareLost(p.PacketNumber) + if !p.skippedPacket && !p.isPathProbePacket { + h.removeFromBytesInFlight(p) + h.queueFramesForRetransmission(p) + } } - if h.handshakePackets != nil { - panic("didn't drop handshake correctly") + for p := range h.appDataPackets.history.PathProbes() { + h.appDataPackets.history.RemovePathProbe(p.PacketNumber) } - h.handshakeConfirmed = true - // We don't send PTOs for application data packets before the handshake completes. - // Make sure the timer is armed now, if necessary. - h.setLossDetectionTimer() + h.congestion = congestion.NewCubicSender( + congestion.DefaultClock{}, + h.rttStats, + initialMaxDatagramSize, + true, // use Reno + h.tracer, + ) + h.setLossDetectionTimer(now) } diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go index c14c0f49..0aabc6d9 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go @@ -2,23 +2,30 @@ package ackhandler import ( "fmt" + "iter" "github.com/quic-go/quic-go/internal/protocol" ) type sentPacketHistory struct { - packets []*packet + packets []*packet + pathProbePackets []*packet numOutstanding int highestPacketNumber protocol.PacketNumber } -func newSentPacketHistory() *sentPacketHistory { - return &sentPacketHistory{ - packets: make([]*packet, 0, 32), +func newSentPacketHistory(isAppData bool) *sentPacketHistory { + h := &sentPacketHistory{ highestPacketNumber: protocol.InvalidPacketNumber, } + if isAppData { + h.packets = make([]*packet, 0, 32) + } else { + h.packets = make([]*packet, 0, 6) + } + return h } func (h *sentPacketHistory) checkSequentialPacketNumberUse(pn protocol.PacketNumber) { @@ -27,11 +34,11 @@ func (h *sentPacketHistory) checkSequentialPacketNumberUse(pn protocol.PacketNum panic("non-sequential packet number use") } } + h.highestPacketNumber = pn } func (h *sentPacketHistory) SkippedPacket(pn protocol.PacketNumber) { h.checkSequentialPacketNumberUse(pn) - h.highestPacketNumber = pn h.packets = append(h.packets, &packet{ PacketNumber: pn, skippedPacket: true, @@ -40,7 +47,6 @@ func (h *sentPacketHistory) SkippedPacket(pn protocol.PacketNumber) { func (h *sentPacketHistory) SentNonAckElicitingPacket(pn protocol.PacketNumber) { h.checkSequentialPacketNumberUse(pn) - h.highestPacketNumber = pn if len(h.packets) > 0 { h.packets = append(h.packets, nil) } @@ -48,28 +54,42 @@ func (h *sentPacketHistory) SentNonAckElicitingPacket(pn protocol.PacketNumber) func (h *sentPacketHistory) SentAckElicitingPacket(p *packet) { h.checkSequentialPacketNumberUse(p.PacketNumber) - h.highestPacketNumber = p.PacketNumber h.packets = append(h.packets, p) if p.outstanding() { h.numOutstanding++ } } -// Iterate iterates through all packets. -func (h *sentPacketHistory) Iterate(cb func(*packet) (cont bool, err error)) error { - for _, p := range h.packets { - if p == nil { - continue - } - cont, err := cb(p) - if err != nil { - return err - } - if !cont { - return nil +func (h *sentPacketHistory) SentPathProbePacket(p *packet) { + h.checkSequentialPacketNumberUse(p.PacketNumber) + h.packets = append(h.packets, &packet{ + PacketNumber: p.PacketNumber, + isPathProbePacket: true, + }) + h.pathProbePackets = append(h.pathProbePackets, p) +} + +func (h *sentPacketHistory) Packets() iter.Seq[*packet] { + return func(yield func(*packet) bool) { + for _, p := range h.packets { + if p == nil { + continue + } + if !yield(p) { + return + } + } + } +} + +func (h *sentPacketHistory) PathProbes() iter.Seq[*packet] { + return func(yield func(*packet) bool) { + for _, p := range h.pathProbePackets { + if !yield(p) { + return + } } } - return nil } // FirstOutstanding returns the first outstanding packet. @@ -85,6 +105,14 @@ func (h *sentPacketHistory) FirstOutstanding() *packet { return nil } +// FirstOutstandingPathProbe returns the first outstanding path probe packet +func (h *sentPacketHistory) FirstOutstandingPathProbe() *packet { + if len(h.pathProbePackets) == 0 { + return nil + } + return h.pathProbePackets[0] +} + func (h *sentPacketHistory) Len() int { return len(h.packets) } @@ -120,6 +148,27 @@ func (h *sentPacketHistory) Remove(pn protocol.PacketNumber) error { return nil } +// RemovePathProbe removes a path probe packet. +// It scales O(N), but that's ok, since we don't expect to send many path probe packets. +// It is not valid to call this function in IteratePathProbes. +func (h *sentPacketHistory) RemovePathProbe(pn protocol.PacketNumber) *packet { + var packetToDelete *packet + idx := -1 + for i, p := range h.pathProbePackets { + if p.PacketNumber == pn { + packetToDelete = p + idx = i + break + } + } + if idx != -1 { + // don't use slices.Delete, because it zeros the deleted element + copy(h.pathProbePackets[idx:], h.pathProbePackets[idx+1:]) + h.pathProbePackets = h.pathProbePackets[:len(h.pathProbePackets)-1] + } + return packetToDelete +} + // getIndex gets the index of packet p in the packets slice. func (h *sentPacketHistory) getIndex(p protocol.PacketNumber) (int, bool) { if len(h.packets) == 0 { @@ -140,6 +189,10 @@ func (h *sentPacketHistory) HasOutstandingPackets() bool { return h.numOutstanding > 0 } +func (h *sentPacketHistory) HasOutstandingPathProbes() bool { + return len(h.pathProbePackets) > 0 +} + // delete all nil entries at the beginning of the packets slice func (h *sentPacketHistory) cleanupStart() { for i, p := range h.packets { diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go index 3d88d577..950e5f72 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go @@ -36,7 +36,7 @@ type baseFlowController struct { // For every offset, it only returns true once. // If it is blocked, the offset is returned. func (c *baseFlowController) IsNewlyBlocked() (bool, protocol.ByteCount) { - if c.sendWindowSize() != 0 || c.sendWindow == c.lastBlockedAt { + if c.SendWindowSize() != 0 || c.sendWindow == c.lastBlockedAt { return false, 0 } c.lastBlockedAt = c.sendWindow @@ -56,7 +56,7 @@ func (c *baseFlowController) UpdateSendWindow(offset protocol.ByteCount) (update return false } -func (c *baseFlowController) sendWindowSize() protocol.ByteCount { +func (c *baseFlowController) SendWindowSize() protocol.ByteCount { // this only happens during connection establishment, when data is sent before we receive the peer's transport parameters if c.bytesSent > c.sendWindow { return 0 @@ -66,11 +66,6 @@ func (c *baseFlowController) sendWindowSize() protocol.ByteCount { // needs to be called with locked mutex func (c *baseFlowController) addBytesRead(n protocol.ByteCount) { - // pretend we sent a WindowUpdate when reading the first byte - // this way auto-tuning of the window size already works for the first WindowUpdate - if c.bytesRead == 0 { - c.startNewAutoTuningEpoch(time.Now()) - } c.bytesRead += n } @@ -82,19 +77,19 @@ func (c *baseFlowController) hasWindowUpdate() bool { // getWindowUpdate updates the receive window, if necessary // it returns the new offset -func (c *baseFlowController) getWindowUpdate() protocol.ByteCount { +func (c *baseFlowController) getWindowUpdate(now time.Time) protocol.ByteCount { if !c.hasWindowUpdate() { return 0 } - c.maybeAdjustWindowSize() + c.maybeAdjustWindowSize(now) c.receiveWindow = c.bytesRead + c.receiveWindowSize return c.receiveWindow } // maybeAdjustWindowSize increases the receiveWindowSize if we're sending updates too often. // For details about auto-tuning, see https://docs.google.com/document/d/1SExkMmGiz8VYzV3s9E35JQlJ73vhzCekKkDi85F1qCE/edit?usp=sharing. -func (c *baseFlowController) maybeAdjustWindowSize() { +func (c *baseFlowController) maybeAdjustWindowSize(now time.Time) { bytesReadInEpoch := c.bytesRead - c.epochStartOffset // don't do anything if less than half the window has been consumed if bytesReadInEpoch <= c.receiveWindowSize/2 { @@ -106,7 +101,6 @@ func (c *baseFlowController) maybeAdjustWindowSize() { } fraction := float64(bytesReadInEpoch) / float64(c.receiveWindowSize) - now := time.Now() if now.Sub(c.epochStartTime) < time.Duration(4*fraction*float64(rtt)) { // window is consumed too fast, try to increase the window size newSize := min(2*c.receiveWindowSize, c.maxReceiveWindowSize) diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go index 8504cdcf..c550e75a 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go @@ -12,8 +12,6 @@ import ( type connectionFlowController struct { baseFlowController - - queueWindowUpdate func() } var _ ConnectionFlowController = &connectionFlowController{} @@ -23,11 +21,10 @@ var _ ConnectionFlowController = &connectionFlowController{} func NewConnectionFlowController( receiveWindow protocol.ByteCount, maxReceiveWindow protocol.ByteCount, - queueWindowUpdate func(), allowWindowIncrease func(size protocol.ByteCount) bool, rttStats *utils.RTTStats, logger utils.Logger, -) ConnectionFlowController { +) *connectionFlowController { return &connectionFlowController{ baseFlowController: baseFlowController{ rttStats: rttStats, @@ -37,20 +34,20 @@ func NewConnectionFlowController( allowWindowIncrease: allowWindowIncrease, logger: logger, }, - queueWindowUpdate: queueWindowUpdate, } } -func (c *connectionFlowController) SendWindowSize() protocol.ByteCount { - return c.baseFlowController.sendWindowSize() -} - // IncrementHighestReceived adds an increment to the highestReceived value -func (c *connectionFlowController) IncrementHighestReceived(increment protocol.ByteCount) error { +func (c *connectionFlowController) IncrementHighestReceived(increment protocol.ByteCount, now time.Time) error { c.mutex.Lock() defer c.mutex.Unlock() + // If this is the first frame received on this connection, start flow-control auto-tuning. + if c.highestReceived == 0 { + c.startNewAutoTuningEpoch(now) + } c.highestReceived += increment + if c.checkFlowControlViolation() { return &qerr.TransportError{ ErrorCode: qerr.FlowControlError, @@ -60,44 +57,47 @@ func (c *connectionFlowController) IncrementHighestReceived(increment protocol.B return nil } -func (c *connectionFlowController) AddBytesRead(n protocol.ByteCount) { +func (c *connectionFlowController) AddBytesRead(n protocol.ByteCount) (hasWindowUpdate bool) { c.mutex.Lock() - c.baseFlowController.addBytesRead(n) - shouldQueueWindowUpdate := c.hasWindowUpdate() - c.mutex.Unlock() - if shouldQueueWindowUpdate { - c.queueWindowUpdate() - } + defer c.mutex.Unlock() + + c.addBytesRead(n) + return c.hasWindowUpdate() } -func (c *connectionFlowController) GetWindowUpdate() protocol.ByteCount { +func (c *connectionFlowController) GetWindowUpdate(now time.Time) protocol.ByteCount { c.mutex.Lock() + defer c.mutex.Unlock() + oldWindowSize := c.receiveWindowSize - offset := c.baseFlowController.getWindowUpdate() - if oldWindowSize < c.receiveWindowSize { + offset := c.getWindowUpdate(now) + if c.logger.Debug() && oldWindowSize < c.receiveWindowSize { c.logger.Debugf("Increasing receive flow control window for the connection to %d kB", c.receiveWindowSize/(1<<10)) } - c.mutex.Unlock() return offset } // EnsureMinimumWindowSize sets a minimum window size // it should make sure that the connection-level window is increased when a stream-level window grows -func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCount) { +func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCount, now time.Time) { c.mutex.Lock() - if inc > c.receiveWindowSize { - c.logger.Debugf("Increasing receive flow control window for the connection to %d kB, in response to stream flow control window increase", c.receiveWindowSize/(1<<10)) - newSize := min(inc, c.maxReceiveWindowSize) - if delta := newSize - c.receiveWindowSize; delta > 0 && c.allowWindowIncrease(delta) { - c.receiveWindowSize = newSize - } - c.startNewAutoTuningEpoch(time.Now()) + defer c.mutex.Unlock() + + if inc <= c.receiveWindowSize { + return } - c.mutex.Unlock() + newSize := min(inc, c.maxReceiveWindowSize) + if delta := newSize - c.receiveWindowSize; delta > 0 && c.allowWindowIncrease(delta) { + c.receiveWindowSize = newSize + if c.logger.Debug() { + c.logger.Debugf("Increasing receive flow control window for the connection to %d, in response to stream flow control window increase", newSize) + } + } + c.startNewAutoTuningEpoch(now) } // Reset rests the flow controller. This happens when 0-RTT is rejected. -// All stream data is invalidated, it's if we had never opened a stream and never sent any data. +// All stream data is invalidated, it's as if we had never opened a stream and never sent any data. // At that point, we only have sent stream data, but we didn't have the keys to open 1-RTT keys yet. func (c *connectionFlowController) Reset() error { c.mutex.Lock() @@ -108,5 +108,6 @@ func (c *connectionFlowController) Reset() error { } c.bytesSent = 0 c.lastBlockedAt = 0 + c.sendWindow = 0 return nil } diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go index fc5f9de0..23cf30c5 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go @@ -1,6 +1,10 @@ package flowcontrol -import "github.com/quic-go/quic-go/internal/protocol" +import ( + "time" + + "github.com/quic-go/quic-go/internal/protocol" +) type flowController interface { // for sending @@ -8,34 +12,36 @@ type flowController interface { UpdateSendWindow(protocol.ByteCount) (updated bool) AddBytesSent(protocol.ByteCount) // for receiving - AddBytesRead(protocol.ByteCount) - GetWindowUpdate() protocol.ByteCount // returns 0 if no update is necessary - IsNewlyBlocked() (bool, protocol.ByteCount) + GetWindowUpdate(time.Time) protocol.ByteCount // returns 0 if no update is necessary } // A StreamFlowController is a flow controller for a QUIC stream. type StreamFlowController interface { flowController + AddBytesRead(protocol.ByteCount) (hasStreamWindowUpdate, hasConnWindowUpdate bool) // UpdateHighestReceived is called when a new highest offset is received // final has to be to true if this is the final offset of the stream, // as contained in a STREAM frame with FIN bit, and the RESET_STREAM frame - UpdateHighestReceived(offset protocol.ByteCount, final bool) error + UpdateHighestReceived(offset protocol.ByteCount, final bool, now time.Time) error // Abandon is called when reading from the stream is aborted early, // and there won't be any further calls to AddBytesRead. Abandon() + IsNewlyBlocked() bool } // The ConnectionFlowController is the flow controller for the connection. type ConnectionFlowController interface { flowController + AddBytesRead(protocol.ByteCount) (hasWindowUpdate bool) Reset() error + IsNewlyBlocked() (bool, protocol.ByteCount) } type connectionFlowControllerI interface { ConnectionFlowController // The following two methods are not supposed to be called from outside this packet, but are needed internally // for sending - EnsureMinimumWindowSize(protocol.ByteCount) + EnsureMinimumWindowSize(protocol.ByteCount, time.Time) // for receiving - IncrementHighestReceived(protocol.ByteCount) error + IncrementHighestReceived(protocol.ByteCount, time.Time) error } diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go index ccee287d..968e9a5b 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go @@ -2,6 +2,7 @@ package flowcontrol import ( "fmt" + "time" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/qerr" @@ -13,8 +14,6 @@ type streamFlowController struct { streamID protocol.StreamID - queueWindowUpdate func() - connection connectionFlowControllerI receivedFinalOffset bool @@ -29,14 +28,12 @@ func NewStreamFlowController( receiveWindow protocol.ByteCount, maxReceiveWindow protocol.ByteCount, initialSendWindow protocol.ByteCount, - queueWindowUpdate func(protocol.StreamID), rttStats *utils.RTTStats, logger utils.Logger, ) StreamFlowController { return &streamFlowController{ - streamID: streamID, - connection: cfc.(connectionFlowControllerI), - queueWindowUpdate: func() { queueWindowUpdate(streamID) }, + streamID: streamID, + connection: cfc.(connectionFlowControllerI), baseFlowController: baseFlowController{ rttStats: rttStats, receiveWindow: receiveWindow, @@ -49,7 +46,7 @@ func NewStreamFlowController( } // UpdateHighestReceived updates the highestReceived value, if the offset is higher. -func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, final bool) error { +func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, final bool, now time.Time) error { // If the final offset for this stream is already known, check for consistency. if c.receivedFinalOffset { // If we receive another final offset, check that it's the same. @@ -74,9 +71,8 @@ func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, if offset == c.highestReceived { return nil } - // A higher offset was received before. - // This can happen due to reordering. - if offset <= c.highestReceived { + // A higher offset was received before. This can happen due to reordering. + if offset < c.highestReceived { if final { return &qerr.TransportError{ ErrorCode: qerr.FinalSizeError, @@ -86,26 +82,29 @@ func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, return nil } + // If this is the first frame received for this stream, start flow-control auto-tuning. + if c.highestReceived == 0 { + c.startNewAutoTuningEpoch(now) + } increment := offset - c.highestReceived c.highestReceived = offset + if c.checkFlowControlViolation() { return &qerr.TransportError{ ErrorCode: qerr.FlowControlError, ErrorMessage: fmt.Sprintf("received %d bytes on stream %d, allowed %d bytes", offset, c.streamID, c.receiveWindow), } } - return c.connection.IncrementHighestReceived(increment) + return c.connection.IncrementHighestReceived(increment, now) } -func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) { +func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) (hasStreamWindowUpdate, hasConnWindowUpdate bool) { c.mutex.Lock() - c.baseFlowController.addBytesRead(n) - shouldQueueWindowUpdate := c.shouldQueueWindowUpdate() + c.addBytesRead(n) + hasStreamWindowUpdate = c.shouldQueueWindowUpdate() c.mutex.Unlock() - if shouldQueueWindowUpdate { - c.queueWindowUpdate() - } - c.connection.AddBytesRead(n) + hasConnWindowUpdate = c.connection.AddBytesRead(n) + return } func (c *streamFlowController) Abandon() { @@ -124,27 +123,32 @@ func (c *streamFlowController) AddBytesSent(n protocol.ByteCount) { } func (c *streamFlowController) SendWindowSize() protocol.ByteCount { - return min(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize()) + return min(c.baseFlowController.SendWindowSize(), c.connection.SendWindowSize()) +} + +func (c *streamFlowController) IsNewlyBlocked() bool { + blocked, _ := c.baseFlowController.IsNewlyBlocked() + return blocked } func (c *streamFlowController) shouldQueueWindowUpdate() bool { return !c.receivedFinalOffset && c.hasWindowUpdate() } -func (c *streamFlowController) GetWindowUpdate() protocol.ByteCount { +func (c *streamFlowController) GetWindowUpdate(now time.Time) protocol.ByteCount { // If we already received the final offset for this stream, the peer won't need any additional flow control credit. if c.receivedFinalOffset { return 0 } - // Don't use defer for unlocking the mutex here, GetWindowUpdate() is called frequently and defer shows up in the profiler c.mutex.Lock() + defer c.mutex.Unlock() + oldWindowSize := c.receiveWindowSize - offset := c.baseFlowController.getWindowUpdate() + offset := c.getWindowUpdate(now) if c.receiveWindowSize > oldWindowSize { // auto-tuning enlarged the window size - c.logger.Debugf("Increasing receive flow control window for stream %d to %d kB", c.streamID, c.receiveWindowSize/(1<<10)) - c.connection.EnsureMinimumWindowSize(protocol.ByteCount(float64(c.receiveWindowSize) * protocol.ConnectionFlowControlMultiplier)) + c.logger.Debugf("Increasing receive flow control window for stream %d to %d", c.streamID, c.receiveWindowSize) + c.connection.EnsureMinimumWindowSize(protocol.ByteCount(float64(c.receiveWindowSize)*protocol.ConnectionFlowControlMultiplier), now) } - c.mutex.Unlock() return offset } diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go b/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go index fd3da738..e7fa8e3d 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go @@ -12,9 +12,7 @@ import ( // These cipher suite implementations are copied from the standard library crypto/tls package. -const ( - aeadNonceLength = 12 -) +const aeadNonceLength = 12 type cipherSuite struct { ID uint16 diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go index 20bcc474..1a86c675 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go @@ -12,7 +12,6 @@ import ( "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/qerr" - "github.com/quic-go/quic-go/internal/qtls" "github.com/quic-go/quic-go/internal/utils" "github.com/quic-go/quic-go/internal/wire" "github.com/quic-go/quic-go/logging" @@ -89,12 +88,13 @@ func NewCryptoSetupClient( tlsConf = tlsConf.Clone() tlsConf.MinVersion = tls.VersionTLS13 - quicConf := &tls.QUICConfig{TLSConfig: tlsConf} - qtls.SetupConfigForClient(quicConf, cs.marshalDataForSessionState, cs.handleDataFromSessionState) cs.tlsConf = tlsConf cs.allow0RTT = enable0RTT - cs.conn = tls.QUICClient(quicConf) + cs.conn = tls.QUICClient(&tls.QUICConfig{ + TLSConfig: tlsConf, + EnableSessionEvents: true, + }) cs.conn.SetTransportParameters(cs.ourParams.Marshal(protocol.PerspectiveClient)) return cs @@ -123,9 +123,13 @@ func NewCryptoSetupServer( ) cs.allow0RTT = allow0RTT - tlsConf = qtls.SetupConfigForServer(tlsConf, localAddr, remoteAddr, cs.getDataForSessionTicket, cs.handleSessionTicket) + tlsConf = setupConfigForServer(tlsConf, localAddr, remoteAddr) + cs.tlsConf = tlsConf - cs.conn = tls.QUICServer(&tls.QUICConfig{TLSConfig: tlsConf}) + cs.conn = tls.QUICServer(&tls.QUICConfig{ + TLSConfig: tlsConf, + EnableSessionEvents: true, + }) return cs } @@ -178,11 +182,10 @@ func (h *cryptoSetup) StartHandshake(ctx context.Context) error { } for { ev := h.conn.NextEvent() - done, err := h.handleEvent(ev) - if err != nil { + if err := h.handleEvent(ev); err != nil { return wrapError(err) } - if done { + if ev.Kind == tls.QUICNoEvent { break } } @@ -213,47 +216,78 @@ func (h *cryptoSetup) HandleMessage(data []byte, encLevel protocol.EncryptionLev } func (h *cryptoSetup) handleMessage(data []byte, encLevel protocol.EncryptionLevel) error { - if err := h.conn.HandleData(qtls.ToTLSEncryptionLevel(encLevel), data); err != nil { + if err := h.conn.HandleData(encLevel.ToTLSEncryptionLevel(), data); err != nil { return err } for { ev := h.conn.NextEvent() - done, err := h.handleEvent(ev) - if err != nil { + if err := h.handleEvent(ev); err != nil { return err } - if done { + if ev.Kind == tls.QUICNoEvent { return nil } } } -func (h *cryptoSetup) handleEvent(ev tls.QUICEvent) (done bool, err error) { +func (h *cryptoSetup) handleEvent(ev tls.QUICEvent) (err error) { switch ev.Kind { case tls.QUICNoEvent: - return true, nil + return nil case tls.QUICSetReadSecret: h.setReadKey(ev.Level, ev.Suite, ev.Data) - return false, nil + return nil case tls.QUICSetWriteSecret: h.setWriteKey(ev.Level, ev.Suite, ev.Data) - return false, nil + return nil case tls.QUICTransportParameters: - return false, h.handleTransportParameters(ev.Data) + return h.handleTransportParameters(ev.Data) case tls.QUICTransportParametersRequired: h.conn.SetTransportParameters(h.ourParams.Marshal(h.perspective)) - return false, nil + return nil case tls.QUICRejectedEarlyData: h.rejected0RTT() - return false, nil + return nil case tls.QUICWriteData: h.writeRecord(ev.Level, ev.Data) - return false, nil + return nil case tls.QUICHandshakeDone: h.handshakeComplete() - return false, nil + return nil + case tls.QUICStoreSession: + if h.perspective == protocol.PerspectiveServer { + panic("cryptoSetup BUG: unexpected QUICStoreSession event for the server") + } + ev.SessionState.Extra = append( + ev.SessionState.Extra, + addSessionStateExtraPrefix(h.marshalDataForSessionState(ev.SessionState.EarlyData)), + ) + return h.conn.StoreSession(ev.SessionState) + case tls.QUICResumeSession: + var allowEarlyData bool + switch h.perspective { + case protocol.PerspectiveClient: + // for clients, this event occurs when a session ticket is selected + allowEarlyData = h.handleDataFromSessionState( + findSessionStateExtraData(ev.SessionState.Extra), + ev.SessionState.EarlyData, + ) + case protocol.PerspectiveServer: + // for servers, this event occurs when receiving the client's session ticket + allowEarlyData = h.handleSessionTicket( + findSessionStateExtraData(ev.SessionState.Extra), + ev.SessionState.EarlyData, + ) + } + if ev.SessionState.EarlyData { + ev.SessionState.EarlyData = allowEarlyData + } + return nil default: - return false, fmt.Errorf("unexpected event: %d", ev.Kind) + // Unknown events should be ignored. + // crypto/tls will ensure that this is safe to do. + // See the discussion following https://github.com/golang/go/issues/68124#issuecomment-2187042510 for details. + return nil } } @@ -344,7 +378,10 @@ func (h *cryptoSetup) getDataForSessionTicket() []byte { // Due to limitations in crypto/tls, it's only possible to generate a single session ticket per connection. // It is only valid for the server. func (h *cryptoSetup) GetSessionTicket() ([]byte, error) { - if err := h.conn.SendSessionTicket(tls.QUICSessionTicketOptions{EarlyData: h.allow0RTT}); err != nil { + if err := h.conn.SendSessionTicket(tls.QUICSessionTicketOptions{ + EarlyData: h.allow0RTT, + Extra: [][]byte{addSessionStateExtraPrefix(h.getDataForSessionTicket())}, + }); err != nil { // Session tickets might be disabled by tls.Config.SessionTicketsDisabled. // We can't check h.tlsConfig here, since the actual config might have been obtained from // the GetConfigForClient callback. @@ -370,9 +407,9 @@ func (h *cryptoSetup) GetSessionTicket() ([]byte, error) { // It reads parameters from the session ticket and checks whether to accept 0-RTT if the session ticket enabled 0-RTT. // Note that the fact that the session ticket allows 0-RTT doesn't mean that the actual TLS handshake enables 0-RTT: // A client may use a 0-RTT enabled session to resume a TLS session without using 0-RTT. -func (h *cryptoSetup) handleSessionTicket(sessionTicketData []byte, using0RTT bool) bool { +func (h *cryptoSetup) handleSessionTicket(data []byte, using0RTT bool) (allowEarlyData bool) { var t sessionTicket - if err := t.Unmarshal(sessionTicketData, using0RTT); err != nil { + if err := t.Unmarshal(data, using0RTT); err != nil { h.logger.Debugf("Unmarshalling session ticket failed: %s", err.Error()) return false } @@ -440,7 +477,7 @@ func (h *cryptoSetup) setReadKey(el tls.QUICEncryptionLevel, suiteID uint16, tra } h.events = append(h.events, Event{Kind: EventReceivedReadKeys}) if h.tracer != nil && h.tracer.UpdatedKeyFromTLS != nil { - h.tracer.UpdatedKeyFromTLS(qtls.FromTLSEncryptionLevel(el), h.perspective.Opposite()) + h.tracer.UpdatedKeyFromTLS(protocol.FromTLSEncryptionLevel(el), h.perspective.Opposite()) } } @@ -491,7 +528,7 @@ func (h *cryptoSetup) setWriteKey(el tls.QUICEncryptionLevel, suiteID uint16, tr panic("unexpected write encryption level") } if h.tracer != nil && h.tracer.UpdatedKeyFromTLS != nil { - h.tracer.UpdatedKeyFromTLS(qtls.FromTLSEncryptionLevel(el), h.perspective) + h.tracer.UpdatedKeyFromTLS(protocol.FromTLSEncryptionLevel(el), h.perspective) } } @@ -621,8 +658,7 @@ func (h *cryptoSetup) ConnectionState() ConnectionState { } func wrapError(err error) error { - // alert 80 is an internal error - if alertErr := tls.AlertError(0); errors.As(err, &alertErr) && alertErr != 80 { + if alertErr := tls.AlertError(0); errors.As(err, &alertErr) { return qerr.NewLocalCryptoError(uint8(alertErr), err) } return &qerr.TransportError{ErrorCode: qerr.InternalError, ErrorMessage: err.Error()} diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/conn.go b/vendor/github.com/quic-go/quic-go/internal/handshake/fake_conn.go similarity index 97% rename from vendor/github.com/quic-go/quic-go/internal/qtls/conn.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/fake_conn.go index 6660ac66..54af823b 100644 --- a/vendor/github.com/quic-go/quic-go/internal/qtls/conn.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/fake_conn.go @@ -1,4 +1,4 @@ -package qtls +package handshake import ( "net" diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go b/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go index 0caf1c8e..3da97cd8 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go @@ -8,8 +8,6 @@ import ( ) // hkdfExpandLabel HKDF expands a label as defined in RFC 8446, section 7.1. -// Since this implementation avoids using a cryptobyte.Builder, it is about 15% faster than the -// hkdfExpandLabel in the standard library. func hkdfExpandLabel(hash crypto.Hash, secret, context []byte, label string, length int) []byte { b := make([]byte, 3, 3+6+len(label)+1+len(context)) binary.BigEndian.PutUint16(b, uint16(length)) diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go b/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go index 6e0eec77..c3a59fcd 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go @@ -83,6 +83,29 @@ const ( EventHandshakeComplete ) +func (k EventKind) String() string { + switch k { + case EventNoEvent: + return "EventNoEvent" + case EventWriteInitialData: + return "EventWriteInitialData" + case EventWriteHandshakeData: + return "EventWriteHandshakeData" + case EventReceivedReadKeys: + return "EventReceivedReadKeys" + case EventDiscard0RTTKeys: + return "EventDiscard0RTTKeys" + case EventReceivedTransportParameters: + return "EventReceivedTransportParameters" + case EventRestoredTransportParameters: + return "EventRestoredTransportParameters" + case EventHandshakeComplete: + return "EventHandshakeComplete" + default: + return "Unknown EventKind" + } +} + // Event is a handshake event. type Event struct { Kind EventKind diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go b/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go index 30643cdf..27a09e22 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go @@ -10,16 +10,13 @@ import ( "github.com/quic-go/quic-go/internal/protocol" ) +// Instead of using an init function, the AEADs are created lazily. +// For more details see https://github.com/quic-go/quic-go/issues/4894. var ( retryAEADv1 cipher.AEAD // used for QUIC v1 (RFC 9000) retryAEADv2 cipher.AEAD // used for QUIC v2 (RFC 9369) ) -func init() { - retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e}) - retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92}) -} - func initAEAD(key [16]byte) cipher.AEAD { aes, err := aes.NewCipher(key[:]) if err != nil { @@ -52,8 +49,14 @@ func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, ve var tag [16]byte var sealed []byte if version == protocol.Version2 { + if retryAEADv2 == nil { + retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92}) + } sealed = retryAEADv2.Seal(tag[:0], retryNonceV2[:], nil, retryBuf.Bytes()) } else { + if retryAEADv1 == nil { + retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e}) + } sealed = retryAEADv1.Seal(tag[:0], retryNonceV1[:], nil, retryBuf.Bytes()) } if len(sealed) != 16 { diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go b/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go index b67f0101..4da517fc 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go @@ -1,6 +1,7 @@ package handshake import ( + "bytes" "errors" "fmt" "time" @@ -52,3 +53,20 @@ func (t *sessionTicket) Unmarshal(b []byte, using0RTT bool) error { t.RTT = time.Duration(rtt) * time.Microsecond return nil } + +const extraPrefix = "quic-go1" + +func addSessionStateExtraPrefix(b []byte) []byte { + return append([]byte(extraPrefix), b...) +} + +func findSessionStateExtraData(extras [][]byte) []byte { + prefix := []byte(extraPrefix) + for _, extra := range extras { + if len(extra) < len(prefix) || !bytes.Equal(prefix, extra[:len(prefix)]) { + continue + } + return extra[len(prefix):] + } + return nil +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/tls_config.go b/vendor/github.com/quic-go/quic-go/internal/handshake/tls_config.go new file mode 100644 index 00000000..c4c0d22d --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/tls_config.go @@ -0,0 +1,39 @@ +package handshake + +import ( + "crypto/tls" + "net" +) + +func setupConfigForServer(conf *tls.Config, localAddr, remoteAddr net.Addr) *tls.Config { + // Workaround for https://github.com/golang/go/issues/60506. + // This initializes the session tickets _before_ cloning the config. + _, _ = conf.DecryptTicket(nil, tls.ConnectionState{}) + + conf = conf.Clone() + conf.MinVersion = tls.VersionTLS13 + + // The tls.Config contains two callbacks that pass in a tls.ClientHelloInfo. + // Since crypto/tls doesn't do it, we need to make sure to set the Conn field with a fake net.Conn + // that allows the caller to get the local and the remote address. + if conf.GetConfigForClient != nil { + gcfc := conf.GetConfigForClient + conf.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) { + info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr} + c, err := gcfc(info) + if c != nil { + // we're returning a tls.Config here, so we need to apply this recursively + c = setupConfigForServer(c, localAddr, remoteAddr) + } + return c, err + } + } + if conf.GetCertificate != nil { + gc := conf.GetCertificate + conf.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { + info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr} + return gc(info) + } + } + return conf +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go b/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go index 2d91e6b2..84e58cfc 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go @@ -46,7 +46,7 @@ type TokenGenerator struct { // NewTokenGenerator initializes a new TokenGenerator func NewTokenGenerator(key TokenProtectorKey) *TokenGenerator { - return &TokenGenerator{tokenProtector: newTokenProtector(key)} + return &TokenGenerator{tokenProtector: *newTokenProtector(key)} } // NewRetryToken generates a new token for a Retry for a given source address diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go b/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go index f3a99e41..15779189 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go @@ -14,28 +14,20 @@ import ( // TokenProtectorKey is the key used to encrypt both Retry and session resumption tokens. type TokenProtectorKey [32]byte -// TokenProtector is used to create and verify a token -type tokenProtector interface { - // NewToken creates a new token - NewToken([]byte) ([]byte, error) - // DecodeToken decodes a token - DecodeToken([]byte) ([]byte, error) -} - const tokenNonceSize = 32 // tokenProtector is used to create and verify a token -type tokenProtectorImpl struct { +type tokenProtector struct { key TokenProtectorKey } // newTokenProtector creates a source for source address tokens -func newTokenProtector(key TokenProtectorKey) tokenProtector { - return &tokenProtectorImpl{key: key} +func newTokenProtector(key TokenProtectorKey) *tokenProtector { + return &tokenProtector{key: key} } // NewToken encodes data into a new token. -func (s *tokenProtectorImpl) NewToken(data []byte) ([]byte, error) { +func (s *tokenProtector) NewToken(data []byte) ([]byte, error) { var nonce [tokenNonceSize]byte if _, err := rand.Read(nonce[:]); err != nil { return nil, err @@ -48,7 +40,7 @@ func (s *tokenProtectorImpl) NewToken(data []byte) ([]byte, error) { } // DecodeToken decodes a token. -func (s *tokenProtectorImpl) DecodeToken(p []byte) ([]byte, error) { +func (s *tokenProtector) DecodeToken(p []byte) ([]byte, error) { if len(p) < tokenNonceSize { return nil, fmt.Errorf("token too short: %d", len(p)) } @@ -60,7 +52,7 @@ func (s *tokenProtectorImpl) DecodeToken(p []byte) ([]byte, error) { return aead.Open(nil, aeadNonce, p[tokenNonceSize:], nil) } -func (s *tokenProtectorImpl) createAEAD(nonce []byte) (cipher.AEAD, []byte, error) { +func (s *tokenProtector) createAEAD(nonce []byte) (cipher.AEAD, []byte, error) { h := hkdf.New(sha256.New, s.key[:], nonce, []byte("quic-go token source")) key := make([]byte, 32) // use a 32 byte key, in order to select AES-256 if _, err := io.ReadFull(h, key); err != nil { diff --git a/vendor/github.com/quic-go/quic-go/internal/logutils/frame.go b/vendor/github.com/quic-go/quic-go/internal/logutils/frame.go deleted file mode 100644 index a6032fc2..00000000 --- a/vendor/github.com/quic-go/quic-go/internal/logutils/frame.go +++ /dev/null @@ -1,50 +0,0 @@ -package logutils - -import ( - "github.com/quic-go/quic-go/internal/protocol" - "github.com/quic-go/quic-go/internal/wire" - "github.com/quic-go/quic-go/logging" -) - -// ConvertFrame converts a wire.Frame into a logging.Frame. -// This makes it possible for external packages to access the frames. -// Furthermore, it removes the data slices from CRYPTO and STREAM frames. -func ConvertFrame(frame wire.Frame) logging.Frame { - switch f := frame.(type) { - case *wire.AckFrame: - // We use a pool for ACK frames. - // Implementations of the tracer interface may hold on to frames, so we need to make a copy here. - return ConvertAckFrame(f) - case *wire.CryptoFrame: - return &logging.CryptoFrame{ - Offset: f.Offset, - Length: protocol.ByteCount(len(f.Data)), - } - case *wire.StreamFrame: - return &logging.StreamFrame{ - StreamID: f.StreamID, - Offset: f.Offset, - Length: f.DataLen(), - Fin: f.Fin, - } - case *wire.DatagramFrame: - return &logging.DatagramFrame{ - Length: logging.ByteCount(len(f.Data)), - } - default: - return logging.Frame(frame) - } -} - -func ConvertAckFrame(f *wire.AckFrame) *logging.AckFrame { - ranges := make([]wire.AckRange, 0, len(f.AckRanges)) - ranges = append(ranges, f.AckRanges...) - ack := &logging.AckFrame{ - AckRanges: ranges, - DelayTime: f.DelayTime, - ECNCE: f.ECNCE, - ECT0: f.ECT0, - ECT1: f.ECT1, - } - return ack -} diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go b/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go index 32d38ab1..40aa331a 100644 --- a/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go @@ -1,5 +1,10 @@ package protocol +import ( + "crypto/tls" + "fmt" +) + // EncryptionLevel is the encryption level // Default value is Unencrypted type EncryptionLevel uint8 @@ -28,3 +33,33 @@ func (e EncryptionLevel) String() string { } return "unknown" } + +func (e EncryptionLevel) ToTLSEncryptionLevel() tls.QUICEncryptionLevel { + switch e { + case EncryptionInitial: + return tls.QUICEncryptionLevelInitial + case EncryptionHandshake: + return tls.QUICEncryptionLevelHandshake + case Encryption1RTT: + return tls.QUICEncryptionLevelApplication + case Encryption0RTT: + return tls.QUICEncryptionLevelEarly + default: + panic(fmt.Sprintf("unexpected encryption level: %s", e)) + } +} + +func FromTLSEncryptionLevel(e tls.QUICEncryptionLevel) EncryptionLevel { + switch e { + case tls.QUICEncryptionLevelInitial: + return EncryptionInitial + case tls.QUICEncryptionLevelHandshake: + return EncryptionHandshake + case tls.QUICEncryptionLevelApplication: + return Encryption1RTT + case tls.QUICEncryptionLevelEarly: + return Encryption0RTT + default: + panic(fmt.Sprintf("unexpect encryption level: %s", e)) + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go b/vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go index bd340161..9422db92 100644 --- a/vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go @@ -21,58 +21,36 @@ const ( PacketNumberLen4 PacketNumberLen = 4 ) -// DecodePacketNumber calculates the packet number based on the received packet number, its length and the last seen packet number -func DecodePacketNumber( - packetNumberLength PacketNumberLen, - lastPacketNumber PacketNumber, - wirePacketNumber PacketNumber, -) PacketNumber { - var epochDelta PacketNumber - switch packetNumberLength { - case PacketNumberLen1: - epochDelta = PacketNumber(1) << 8 - case PacketNumberLen2: - epochDelta = PacketNumber(1) << 16 - case PacketNumberLen3: - epochDelta = PacketNumber(1) << 24 - case PacketNumberLen4: - epochDelta = PacketNumber(1) << 32 +// DecodePacketNumber calculates the packet number based its length and the last seen packet number +// This function is taken from https://www.rfc-editor.org/rfc/rfc9000.html#section-a.3. +func DecodePacketNumber(length PacketNumberLen, largest PacketNumber, truncated PacketNumber) PacketNumber { + expected := largest + 1 + win := PacketNumber(1 << (length * 8)) + hwin := win / 2 + mask := win - 1 + candidate := (expected & ^mask) | truncated + if candidate <= expected-hwin && candidate < 1<<62-win { + return candidate + win } - epoch := lastPacketNumber & ^(epochDelta - 1) - var prevEpochBegin PacketNumber - if epoch > epochDelta { - prevEpochBegin = epoch - epochDelta + if candidate > expected+hwin && candidate >= win { + return candidate - win } - nextEpochBegin := epoch + epochDelta - return closestTo( - lastPacketNumber+1, - epoch+wirePacketNumber, - closestTo(lastPacketNumber+1, prevEpochBegin+wirePacketNumber, nextEpochBegin+wirePacketNumber), - ) + return candidate } -func closestTo(target, a, b PacketNumber) PacketNumber { - if delta(target, a) < delta(target, b) { - return a - } - return b -} - -func delta(a, b PacketNumber) PacketNumber { - if a < b { - return b - a - } - return a - b -} - -// GetPacketNumberLengthForHeader gets the length of the packet number for the public header +// PacketNumberLengthForHeader gets the length of the packet number for the public header // it never chooses a PacketNumberLen of 1 byte, since this is too short under certain circumstances -func GetPacketNumberLengthForHeader(packetNumber, leastUnacked PacketNumber) PacketNumberLen { - diff := uint64(packetNumber - leastUnacked) - if diff < (1 << (16 - 1)) { +func PacketNumberLengthForHeader(pn, largestAcked PacketNumber) PacketNumberLen { + var numUnacked PacketNumber + if largestAcked == InvalidPacketNumber { + numUnacked = pn + 1 + } else { + numUnacked = pn - largestAcked + } + if numUnacked < 1<<(16-1) { return PacketNumberLen2 } - if diff < (1 << (24 - 1)) { + if numUnacked < 1<<(24-1) { return PacketNumberLen3 } return PacketNumberLen4 diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/params.go b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go index 7c4d8d4d..f0aa3ad9 100644 --- a/vendor/github.com/quic-go/quic-go/internal/protocol/params.go +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go @@ -102,10 +102,6 @@ const DefaultIdleTimeout = 30 * time.Second // DefaultHandshakeIdleTimeout is the default idle timeout used before handshake completion. const DefaultHandshakeIdleTimeout = 5 * time.Second -// MaxKeepAliveInterval is the maximum time until we send a packet to keep a connection alive. -// It should be shorter than the time that NATs clear their mapping. -const MaxKeepAliveInterval = 20 * time.Second - // RetiredConnectionIDDeleteTimeout is the time we keep closed connections around in order to retransmit the CONNECTION_CLOSE. // after this time all information about the old connection will be deleted const RetiredConnectionIDDeleteTimeout = 5 * time.Second diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go b/vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go index d056cb9d..b1109ff4 100644 --- a/vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go @@ -123,6 +123,10 @@ const MinUnknownVersionPacketSize = MinInitialPacketSize // MinStatelessResetSize is the minimum size of a stateless reset packet that we send const MinStatelessResetSize = 1 /* first byte */ + 20 /* max. conn ID length */ + 4 /* max. packet number length */ + 1 /* min. payload length */ + 16 /* token */ +// MinReceivedStatelessResetSize is the minimum size of a received stateless reset, +// as specified in section 10.3 of RFC 9000. +const MinReceivedStatelessResetSize = 5 + 16 + // MinConnectionIDLenInitial is the minimum length of the destination connection ID on an Initial packet. const MinConnectionIDLenInitial = 8 diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/version.go b/vendor/github.com/quic-go/quic-go/internal/protocol/version.go index 025ade9b..646587f3 100644 --- a/vendor/github.com/quic-go/quic-go/internal/protocol/version.go +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/version.go @@ -1,13 +1,12 @@ package protocol import ( + "crypto/rand" "encoding/binary" "fmt" "math" + mrand "math/rand/v2" "sync" - "time" - - "golang.org/x/exp/rand" ) // Version is a version number as int @@ -90,13 +89,22 @@ func ChooseSupportedVersion(ours, theirs []Version) (Version, bool) { var ( versionNegotiationMx sync.Mutex - versionNegotiationRand = rand.New(rand.NewSource(uint64(time.Now().UnixNano()))) + versionNegotiationRand mrand.Rand ) +func init() { + var seed [16]byte + rand.Read(seed[:]) + versionNegotiationRand = *mrand.New(mrand.NewPCG( + binary.BigEndian.Uint64(seed[:8]), + binary.BigEndian.Uint64(seed[8:]), + )) +} + // generateReservedVersion generates a reserved version (v & 0x0f0f0f0f == 0x0a0a0a0a) func generateReservedVersion() Version { var b [4]byte - _, _ = versionNegotiationRand.Read(b[:]) // ignore the error here. Failure to read random data doesn't break anything + binary.BigEndian.PutUint32(b[:], versionNegotiationRand.Uint32()) return Version((binary.BigEndian.Uint32(b[:]) | 0x0a0a0a0a) & 0xfafafafa) } @@ -105,7 +113,7 @@ func generateReservedVersion() Version { func GetGreasedVersions(supported []Version) []Version { versionNegotiationMx.Lock() defer versionNegotiationMx.Unlock() - randPos := rand.Intn(len(supported) + 1) + randPos := versionNegotiationRand.IntN(len(supported) + 1) greased := make([]Version, len(supported)+1) copy(greased, supported[:randPos]) greased[randPos] = generateReservedVersion() diff --git a/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go b/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go index 8f5936df..7fe1c293 100644 --- a/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go +++ b/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go @@ -48,21 +48,16 @@ func (e *TransportError) Error() string { return str + ": " + msg } -func (e *TransportError) Is(target error) bool { - return target == net.ErrClosed -} +func (e *TransportError) Unwrap() []error { return []error{net.ErrClosed, e.error} } -func (e *TransportError) Unwrap() error { - return e.error +func (e *TransportError) Is(target error) bool { + t, ok := target.(*TransportError) + return ok && e.ErrorCode == t.ErrorCode && e.FrameType == t.FrameType && e.Remote == t.Remote } // An ApplicationErrorCode is an application-defined error code. type ApplicationErrorCode uint64 -func (e *ApplicationError) Is(target error) bool { - return target == net.ErrClosed -} - // A StreamErrorCode is an error code used to cancel streams. type StreamErrorCode uint64 @@ -81,23 +76,30 @@ func (e *ApplicationError) Error() string { return fmt.Sprintf("Application error %#x (%s): %s", e.ErrorCode, getRole(e.Remote), e.ErrorMessage) } +func (e *ApplicationError) Unwrap() error { return net.ErrClosed } + +func (e *ApplicationError) Is(target error) bool { + t, ok := target.(*ApplicationError) + return ok && e.ErrorCode == t.ErrorCode && e.Remote == t.Remote +} + type IdleTimeoutError struct{} var _ error = &IdleTimeoutError{} -func (e *IdleTimeoutError) Timeout() bool { return true } -func (e *IdleTimeoutError) Temporary() bool { return false } -func (e *IdleTimeoutError) Error() string { return "timeout: no recent network activity" } -func (e *IdleTimeoutError) Is(target error) bool { return target == net.ErrClosed } +func (e *IdleTimeoutError) Timeout() bool { return true } +func (e *IdleTimeoutError) Temporary() bool { return false } +func (e *IdleTimeoutError) Error() string { return "timeout: no recent network activity" } +func (e *IdleTimeoutError) Unwrap() error { return net.ErrClosed } type HandshakeTimeoutError struct{} var _ error = &HandshakeTimeoutError{} -func (e *HandshakeTimeoutError) Timeout() bool { return true } -func (e *HandshakeTimeoutError) Temporary() bool { return false } -func (e *HandshakeTimeoutError) Error() string { return "timeout: handshake did not complete in time" } -func (e *HandshakeTimeoutError) Is(target error) bool { return target == net.ErrClosed } +func (e *HandshakeTimeoutError) Timeout() bool { return true } +func (e *HandshakeTimeoutError) Temporary() bool { return false } +func (e *HandshakeTimeoutError) Error() string { return "timeout: handshake did not complete in time" } +func (e *HandshakeTimeoutError) Unwrap() error { return net.ErrClosed } // A VersionNegotiationError occurs when the client and the server can't agree on a QUIC version. type VersionNegotiationError struct { @@ -109,25 +111,18 @@ func (e *VersionNegotiationError) Error() string { return fmt.Sprintf("no compatible QUIC version found (we support %s, server offered %s)", e.Ours, e.Theirs) } -func (e *VersionNegotiationError) Is(target error) bool { - return target == net.ErrClosed -} +func (e *VersionNegotiationError) Unwrap() error { return net.ErrClosed } // A StatelessResetError occurs when we receive a stateless reset. -type StatelessResetError struct { - Token protocol.StatelessResetToken -} +type StatelessResetError struct{} var _ net.Error = &StatelessResetError{} func (e *StatelessResetError) Error() string { - return fmt.Sprintf("received a stateless reset with token %x", e.Token) -} - -func (e *StatelessResetError) Is(target error) bool { - return target == net.ErrClosed + return "received a stateless reset" } +func (e *StatelessResetError) Unwrap() error { return net.ErrClosed } func (e *StatelessResetError) Timeout() bool { return false } func (e *StatelessResetError) Temporary() bool { return true } diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go b/vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go deleted file mode 100644 index 32a921cd..00000000 --- a/vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go +++ /dev/null @@ -1,52 +0,0 @@ -package qtls - -import ( - "crypto/tls" - "fmt" - "unsafe" -) - -//go:linkname cipherSuitesTLS13 crypto/tls.cipherSuitesTLS13 -var cipherSuitesTLS13 []unsafe.Pointer - -//go:linkname defaultCipherSuitesTLS13 crypto/tls.defaultCipherSuitesTLS13 -var defaultCipherSuitesTLS13 []uint16 - -//go:linkname defaultCipherSuitesTLS13NoAES crypto/tls.defaultCipherSuitesTLS13NoAES -var defaultCipherSuitesTLS13NoAES []uint16 - -var cipherSuitesModified bool - -// SetCipherSuite modifies the cipherSuiteTLS13 slice of cipher suites inside qtls -// such that it only contains the cipher suite with the chosen id. -// The reset function returned resets them back to the original value. -func SetCipherSuite(id uint16) (reset func()) { - if cipherSuitesModified { - panic("cipher suites modified multiple times without resetting") - } - cipherSuitesModified = true - - origCipherSuitesTLS13 := append([]unsafe.Pointer{}, cipherSuitesTLS13...) - origDefaultCipherSuitesTLS13 := append([]uint16{}, defaultCipherSuitesTLS13...) - origDefaultCipherSuitesTLS13NoAES := append([]uint16{}, defaultCipherSuitesTLS13NoAES...) - // The order is given by the order of the slice elements in cipherSuitesTLS13 in qtls. - switch id { - case tls.TLS_AES_128_GCM_SHA256: - cipherSuitesTLS13 = cipherSuitesTLS13[:1] - case tls.TLS_CHACHA20_POLY1305_SHA256: - cipherSuitesTLS13 = cipherSuitesTLS13[1:2] - case tls.TLS_AES_256_GCM_SHA384: - cipherSuitesTLS13 = cipherSuitesTLS13[2:] - default: - panic(fmt.Sprintf("unexpected cipher suite: %d", id)) - } - defaultCipherSuitesTLS13 = []uint16{id} - defaultCipherSuitesTLS13NoAES = []uint16{id} - - return func() { - cipherSuitesTLS13 = origCipherSuitesTLS13 - defaultCipherSuitesTLS13 = origDefaultCipherSuitesTLS13 - defaultCipherSuitesTLS13NoAES = origDefaultCipherSuitesTLS13NoAES - cipherSuitesModified = false - } -} diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go b/vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go deleted file mode 100644 index 4acac9e2..00000000 --- a/vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go +++ /dev/null @@ -1,70 +0,0 @@ -package qtls - -import ( - "crypto/tls" - "sync" -) - -type clientSessionCache struct { - mx sync.Mutex - getData func(earlyData bool) []byte - setData func(data []byte, earlyData bool) (allowEarlyData bool) - wrapped tls.ClientSessionCache -} - -var _ tls.ClientSessionCache = &clientSessionCache{} - -func (c *clientSessionCache) Put(key string, cs *tls.ClientSessionState) { - c.mx.Lock() - defer c.mx.Unlock() - - if cs == nil { - c.wrapped.Put(key, nil) - return - } - ticket, state, err := cs.ResumptionState() - if err != nil || state == nil { - c.wrapped.Put(key, cs) - return - } - state.Extra = append(state.Extra, addExtraPrefix(c.getData(state.EarlyData))) - newCS, err := tls.NewResumptionState(ticket, state) - if err != nil { - // It's not clear why this would error. Just save the original state. - c.wrapped.Put(key, cs) - return - } - c.wrapped.Put(key, newCS) -} - -func (c *clientSessionCache) Get(key string) (*tls.ClientSessionState, bool) { - c.mx.Lock() - defer c.mx.Unlock() - - cs, ok := c.wrapped.Get(key) - if !ok || cs == nil { - return cs, ok - } - ticket, state, err := cs.ResumptionState() - if err != nil { - // It's not clear why this would error. - // Remove the ticket from the session cache, so we don't run into this error over and over again - c.wrapped.Put(key, nil) - return nil, false - } - // restore QUIC transport parameters and RTT stored in state.Extra - if extra := findExtraData(state.Extra); extra != nil { - earlyData := c.setData(extra, state.EarlyData) - if state.EarlyData { - state.EarlyData = earlyData - } - } - session, err := tls.NewResumptionState(ticket, state) - if err != nil { - // It's not clear why this would error. - // Remove the ticket from the session cache, so we don't run into this error over and over again - c.wrapped.Put(key, nil) - return nil, false - } - return session, true -} diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go b/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go deleted file mode 100644 index cdfe82a2..00000000 --- a/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go +++ /dev/null @@ -1,150 +0,0 @@ -package qtls - -import ( - "bytes" - "crypto/tls" - "fmt" - "net" - - "github.com/quic-go/quic-go/internal/protocol" -) - -func SetupConfigForServer( - conf *tls.Config, - localAddr, remoteAddr net.Addr, - getData func() []byte, - handleSessionTicket func([]byte, bool) bool, -) *tls.Config { - // Workaround for https://github.com/golang/go/issues/60506. - // This initializes the session tickets _before_ cloning the config. - _, _ = conf.DecryptTicket(nil, tls.ConnectionState{}) - - conf = conf.Clone() - conf.MinVersion = tls.VersionTLS13 - - // add callbacks to save transport parameters into the session ticket - origWrapSession := conf.WrapSession - conf.WrapSession = func(cs tls.ConnectionState, state *tls.SessionState) ([]byte, error) { - // Add QUIC session ticket - state.Extra = append(state.Extra, addExtraPrefix(getData())) - - if origWrapSession != nil { - return origWrapSession(cs, state) - } - b, err := conf.EncryptTicket(cs, state) - return b, err - } - origUnwrapSession := conf.UnwrapSession - // UnwrapSession might be called multiple times, as the client can use multiple session tickets. - // However, using 0-RTT is only possible with the first session ticket. - // crypto/tls guarantees that this callback is called in the same order as the session ticket in the ClientHello. - var unwrapCount int - conf.UnwrapSession = func(identity []byte, connState tls.ConnectionState) (*tls.SessionState, error) { - unwrapCount++ - var state *tls.SessionState - var err error - if origUnwrapSession != nil { - state, err = origUnwrapSession(identity, connState) - } else { - state, err = conf.DecryptTicket(identity, connState) - } - if err != nil || state == nil { - return nil, err - } - - extra := findExtraData(state.Extra) - if extra != nil { - state.EarlyData = handleSessionTicket(extra, state.EarlyData && unwrapCount == 1) - } else { - state.EarlyData = false - } - - return state, nil - } - // The tls.Config contains two callbacks that pass in a tls.ClientHelloInfo. - // Since crypto/tls doesn't do it, we need to make sure to set the Conn field with a fake net.Conn - // that allows the caller to get the local and the remote address. - if conf.GetConfigForClient != nil { - gcfc := conf.GetConfigForClient - conf.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) { - info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr} - c, err := gcfc(info) - if c != nil { - // We're returning a tls.Config here, so we need to apply this recursively. - c = SetupConfigForServer(c, localAddr, remoteAddr, getData, handleSessionTicket) - } - return c, err - } - } - if conf.GetCertificate != nil { - gc := conf.GetCertificate - conf.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { - info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr} - return gc(info) - } - } - return conf -} - -func SetupConfigForClient( - qconf *tls.QUICConfig, - getData func(earlyData bool) []byte, - setData func(data []byte, earlyData bool) (allowEarlyData bool), -) { - conf := qconf.TLSConfig - if conf.ClientSessionCache != nil { - origCache := conf.ClientSessionCache - conf.ClientSessionCache = &clientSessionCache{ - wrapped: origCache, - getData: getData, - setData: setData, - } - } -} - -func ToTLSEncryptionLevel(e protocol.EncryptionLevel) tls.QUICEncryptionLevel { - switch e { - case protocol.EncryptionInitial: - return tls.QUICEncryptionLevelInitial - case protocol.EncryptionHandshake: - return tls.QUICEncryptionLevelHandshake - case protocol.Encryption1RTT: - return tls.QUICEncryptionLevelApplication - case protocol.Encryption0RTT: - return tls.QUICEncryptionLevelEarly - default: - panic(fmt.Sprintf("unexpected encryption level: %s", e)) - } -} - -func FromTLSEncryptionLevel(e tls.QUICEncryptionLevel) protocol.EncryptionLevel { - switch e { - case tls.QUICEncryptionLevelInitial: - return protocol.EncryptionInitial - case tls.QUICEncryptionLevelHandshake: - return protocol.EncryptionHandshake - case tls.QUICEncryptionLevelApplication: - return protocol.Encryption1RTT - case tls.QUICEncryptionLevelEarly: - return protocol.Encryption0RTT - default: - panic(fmt.Sprintf("unexpect encryption level: %s", e)) - } -} - -const extraPrefix = "quic-go1" - -func addExtraPrefix(b []byte) []byte { - return append([]byte(extraPrefix), b...) -} - -func findExtraData(extras [][]byte) []byte { - prefix := []byte(extraPrefix) - for _, extra := range extras { - if len(extra) < len(prefix) || !bytes.Equal(prefix, extra[:len(prefix)]) { - continue - } - return extra[len(prefix):] - } - return nil -} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go b/vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go index b5b9d6fc..80ebfecb 100644 --- a/vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go +++ b/vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go @@ -19,7 +19,7 @@ func NewBufferedWriteCloser(writer *bufio.Writer, closer io.Closer) io.WriteClos } func (h bufferedWriteCloser) Close() error { - if err := h.Writer.Flush(); err != nil { + if err := h.Flush(); err != nil { return err } return h.Closer.Close() diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go b/vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go deleted file mode 100644 index a9b715e2..00000000 --- a/vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go +++ /dev/null @@ -1,21 +0,0 @@ -package utils - -import ( - "bytes" - "io" -) - -// A ByteOrder specifies how to convert byte sequences into 16-, 32-, or 64-bit unsigned integers. -type ByteOrder interface { - Uint32([]byte) uint32 - Uint24([]byte) uint32 - Uint16([]byte) uint16 - - ReadUint32(io.ByteReader) (uint32, error) - ReadUint24(io.ByteReader) (uint32, error) - ReadUint16(io.ByteReader) (uint16, error) - - WriteUint32(*bytes.Buffer, uint32) - WriteUint24(*bytes.Buffer, uint32) - WriteUint16(*bytes.Buffer, uint16) -} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go b/vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go deleted file mode 100644 index 834a711b..00000000 --- a/vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go +++ /dev/null @@ -1,103 +0,0 @@ -package utils - -import ( - "bytes" - "encoding/binary" - "io" -) - -// BigEndian is the big-endian implementation of ByteOrder. -var BigEndian ByteOrder = bigEndian{} - -type bigEndian struct{} - -var _ ByteOrder = &bigEndian{} - -// ReadUintN reads N bytes -func (bigEndian) ReadUintN(b io.ByteReader, length uint8) (uint64, error) { - var res uint64 - for i := uint8(0); i < length; i++ { - bt, err := b.ReadByte() - if err != nil { - return 0, err - } - res ^= uint64(bt) << ((length - 1 - i) * 8) - } - return res, nil -} - -// ReadUint32 reads a uint32 -func (bigEndian) ReadUint32(b io.ByteReader) (uint32, error) { - var b1, b2, b3, b4 uint8 - var err error - if b4, err = b.ReadByte(); err != nil { - return 0, err - } - if b3, err = b.ReadByte(); err != nil { - return 0, err - } - if b2, err = b.ReadByte(); err != nil { - return 0, err - } - if b1, err = b.ReadByte(); err != nil { - return 0, err - } - return uint32(b1) + uint32(b2)<<8 + uint32(b3)<<16 + uint32(b4)<<24, nil -} - -// ReadUint24 reads a uint24 -func (bigEndian) ReadUint24(b io.ByteReader) (uint32, error) { - var b1, b2, b3 uint8 - var err error - if b3, err = b.ReadByte(); err != nil { - return 0, err - } - if b2, err = b.ReadByte(); err != nil { - return 0, err - } - if b1, err = b.ReadByte(); err != nil { - return 0, err - } - return uint32(b1) + uint32(b2)<<8 + uint32(b3)<<16, nil -} - -// ReadUint16 reads a uint16 -func (bigEndian) ReadUint16(b io.ByteReader) (uint16, error) { - var b1, b2 uint8 - var err error - if b2, err = b.ReadByte(); err != nil { - return 0, err - } - if b1, err = b.ReadByte(); err != nil { - return 0, err - } - return uint16(b1) + uint16(b2)<<8, nil -} - -func (bigEndian) Uint32(b []byte) uint32 { - return binary.BigEndian.Uint32(b) -} - -func (bigEndian) Uint24(b []byte) uint32 { - _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 -} - -func (bigEndian) Uint16(b []byte) uint16 { - return binary.BigEndian.Uint16(b) -} - -// WriteUint32 writes a uint32 -func (bigEndian) WriteUint32(b *bytes.Buffer, i uint32) { - b.Write([]byte{uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i)}) -} - -// WriteUint24 writes a uint24 -func (bigEndian) WriteUint24(b *bytes.Buffer, i uint32) { - b.Write([]byte{uint8(i >> 16), uint8(i >> 8), uint8(i)}) -} - -// WriteUint16 writes a uint16 -func (bigEndian) WriteUint16(b *bytes.Buffer, i uint16) { - b.Write([]byte{uint8(i >> 8), uint8(i)}) -} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/ip.go b/vendor/github.com/quic-go/quic-go/internal/utils/ip.go deleted file mode 100644 index 7ac7ffec..00000000 --- a/vendor/github.com/quic-go/quic-go/internal/utils/ip.go +++ /dev/null @@ -1,10 +0,0 @@ -package utils - -import "net" - -func IsIPv4(ip net.IP) bool { - // If ip is not an IPv4 address, To4 returns nil. - // Note that there might be some corner cases, where this is not correct. - // See https://stackoverflow.com/questions/22751035/golang-distinguish-ipv4-ipv6. - return ip.To4() != nil -} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/minmax.go b/vendor/github.com/quic-go/quic-go/internal/utils/minmax.go deleted file mode 100644 index 03a9c9a8..00000000 --- a/vendor/github.com/quic-go/quic-go/internal/utils/minmax.go +++ /dev/null @@ -1,36 +0,0 @@ -package utils - -import ( - "math" - "time" -) - -// InfDuration is a duration of infinite length -const InfDuration = time.Duration(math.MaxInt64) - -// MinNonZeroDuration return the minimum duration that's not zero. -func MinNonZeroDuration(a, b time.Duration) time.Duration { - if a == 0 { - return b - } - if b == 0 { - return a - } - return min(a, b) -} - -// MinTime returns the earlier time -func MinTime(a, b time.Time) time.Time { - if a.After(b) { - return b - } - return a -} - -// MaxTime returns the later time -func MaxTime(a, b time.Time) time.Time { - if a.After(b) { - return a - } - return b -} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go index 463b9542..0efd8354 100644 --- a/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go +++ b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go @@ -27,11 +27,6 @@ type RTTStats struct { maxAckDelay time.Duration } -// NewRTTStats makes a properly initialized RTTStats object -func NewRTTStats() *RTTStats { - return &RTTStats{} -} - // MinRTT Returns the minRTT for the entire connection. // May return Zero if no valid updates have occurred. func (r *RTTStats) MinRTT() time.Duration { return r.minRTT } @@ -63,8 +58,8 @@ func (r *RTTStats) PTO(includeMaxAckDelay bool) time.Duration { } // UpdateRTT updates the RTT based on a new sample. -func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration, now time.Time) { - if sendDelta == InfDuration || sendDelta <= 0 { +func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration) { + if sendDelta <= 0 { return } @@ -114,18 +109,11 @@ func (r *RTTStats) SetInitialRTT(t time.Duration) { r.latestRTT = t } -// OnConnectionMigration is called when connection migrates and rtt measurement needs to be reset. -func (r *RTTStats) OnConnectionMigration() { - r.latestRTT = 0 +func (r *RTTStats) ResetForPathMigration() { + r.hasMeasurement = false r.minRTT = 0 + r.latestRTT = 0 r.smoothedRTT = 0 r.meanDeviation = 0 -} - -// ExpireSmoothedMetrics causes the smoothed_rtt to be increased to the latest_rtt if the latest_rtt -// is larger. The mean deviation is increased to the most recent deviation if -// it's larger. -func (r *RTTStats) ExpireSmoothedMetrics() { - r.meanDeviation = max(r.meanDeviation, (r.smoothedRTT - r.latestRTT).Abs()) - r.smoothedRTT = max(r.smoothedRTT, r.latestRTT) + // max_ack_delay remains valid } diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go index e0f2db3c..8befef4f 100644 --- a/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go @@ -2,11 +2,11 @@ package wire import ( "errors" + "math" "sort" "time" "github.com/quic-go/quic-go/internal/protocol" - "github.com/quic-go/quic-go/internal/utils" "github.com/quic-go/quic-go/quicvarint" ) @@ -40,7 +40,7 @@ func parseAckFrame(frame *AckFrame, b []byte, typ uint64, ackDelayExponent uint8 delayTime := time.Duration(delay*1< seq { - //nolint:stylecheck + //nolint:staticcheck // SA1021: Retire Prior To is the name of the field return nil, 0, fmt.Errorf("Retire Prior To value (%d) larger than Sequence Number (%d)", ret, seq) } if len(b) == 0 { diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go b/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go index 69aa8341..cf2889c5 100644 --- a/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go @@ -2,7 +2,6 @@ package wire import ( "errors" - "fmt" "io" "github.com/quic-go/quic-go/internal/protocol" @@ -28,25 +27,15 @@ func ParseShortHeader(data []byte, connIDLen int) (length int, _ protocol.Packet } pos := 1 + connIDLen - var pn protocol.PacketNumber - switch pnLen { - case protocol.PacketNumberLen1: - pn = protocol.PacketNumber(data[pos]) - case protocol.PacketNumberLen2: - pn = protocol.PacketNumber(utils.BigEndian.Uint16(data[pos : pos+2])) - case protocol.PacketNumberLen3: - pn = protocol.PacketNumber(utils.BigEndian.Uint24(data[pos : pos+3])) - case protocol.PacketNumberLen4: - pn = protocol.PacketNumber(utils.BigEndian.Uint32(data[pos : pos+4])) - default: - return 0, 0, 0, 0, fmt.Errorf("invalid packet number length: %d", pnLen) + pn, err := readPacketNumber(data[pos:], pnLen) + if err != nil { + return 0, 0, 0, 0, err } kp := protocol.KeyPhaseZero if data[0]&0b100 > 0 { kp = protocol.KeyPhaseOne } - var err error if data[0]&0x18 != 0 { err = ErrInvalidReservedBits } diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go index f9470ecd..cdc32722 100644 --- a/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go @@ -58,7 +58,10 @@ func parseStreamFrame(b []byte, typ uint64, _ protocol.Version) (*StreamFrame, i var frame *StreamFrame if dataLen < protocol.MinStreamFrameBufferSize { - frame = &StreamFrame{Data: make([]byte, dataLen)} + frame = &StreamFrame{} + if dataLen > 0 { + frame.Data = make([]byte, dataLen) + } } else { frame = GetStreamFrame() // The STREAM frame can't be larger than the StreamFrame we obtained from the buffer, @@ -74,7 +77,7 @@ func parseStreamFrame(b []byte, typ uint64, _ protocol.Version) (*StreamFrame, i frame.Fin = fin frame.DataLenPresent = hasDataLen - if dataLen != 0 { + if dataLen > 0 { copy(frame.Data, b) } if frame.Offset+frame.DataLen() > protocol.MaxByteCount { diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go b/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go index cee74b8f..d39189a0 100644 --- a/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go @@ -245,11 +245,15 @@ func (p *TransportParameters) readPreferredAddress(b []byte, expectedLen int) er copy(ipv4[:], b[:4]) port4 := binary.BigEndian.Uint16(b[4:]) b = b[4+2:] - pa.IPv4 = netip.AddrPortFrom(netip.AddrFrom4(ipv4), port4) + if port4 != 0 && ipv4 != [4]byte{} { + pa.IPv4 = netip.AddrPortFrom(netip.AddrFrom4(ipv4), port4) + } var ipv6 [16]byte copy(ipv6[:], b[:16]) port6 := binary.BigEndian.Uint16(b[16:]) - pa.IPv6 = netip.AddrPortFrom(netip.AddrFrom16(ipv6), port6) + if port6 != 0 && ipv6 != [16]byte{} { + pa.IPv6 = netip.AddrPortFrom(netip.AddrFrom16(ipv6), port6) + } b = b[16+2:] connIDLen := int(b[0]) b = b[1:] @@ -391,12 +395,20 @@ func (p *TransportParameters) Marshal(pers protocol.Perspective) []byte { if p.PreferredAddress != nil { b = quicvarint.Append(b, uint64(preferredAddressParameterID)) b = quicvarint.Append(b, 4+2+16+2+1+uint64(p.PreferredAddress.ConnectionID.Len())+16) - ip4 := p.PreferredAddress.IPv4.Addr().As4() - b = append(b, ip4[:]...) - b = binary.BigEndian.AppendUint16(b, p.PreferredAddress.IPv4.Port()) - ip6 := p.PreferredAddress.IPv6.Addr().As16() - b = append(b, ip6[:]...) - b = binary.BigEndian.AppendUint16(b, p.PreferredAddress.IPv6.Port()) + if p.PreferredAddress.IPv4.IsValid() { + ipv4 := p.PreferredAddress.IPv4.Addr().As4() + b = append(b, ipv4[:]...) + b = binary.BigEndian.AppendUint16(b, p.PreferredAddress.IPv4.Port()) + } else { + b = append(b, make([]byte, 6)...) + } + if p.PreferredAddress.IPv6.IsValid() { + ipv6 := p.PreferredAddress.IPv6.Addr().As16() + b = append(b, ipv6[:]...) + b = binary.BigEndian.AppendUint16(b, p.PreferredAddress.IPv6.Port()) + } else { + b = append(b, make([]byte, 18)...) + } b = append(b, uint8(p.PreferredAddress.ConnectionID.Len())) b = append(b, p.PreferredAddress.ConnectionID.Bytes()...) b = append(b, p.PreferredAddress.StatelessResetToken[:]...) diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go b/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go index a551aa8c..407cb629 100644 --- a/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go @@ -16,11 +16,11 @@ func ParseVersionNegotiationPacket(b []byte) (dest, src protocol.ArbitraryLenCon } b = b[n:] if len(b) == 0 { - //nolint:stylecheck + //nolint:staticcheck // SA1021: the packet is called Version Negotiation packet return nil, nil, nil, errors.New("Version Negotiation packet has empty version list") } if len(b)%4 != 0 { - //nolint:stylecheck + //nolint:staticcheck // SA1021: the packet is called Version Negotiation packet return nil, nil, nil, errors.New("Version Negotiation packet has a version list with an invalid length") } versions := make([]protocol.Version, len(b)/4) diff --git a/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go b/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go index a354bbd9..f218e046 100644 --- a/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go +++ b/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go @@ -5,34 +5,36 @@ import ( "time" ) +//go:generate go run generate_multiplexer.go ConnectionTracer connection_tracer.go multiplexer.tmpl connection_tracer_multiplexer.go + // A ConnectionTracer records events. type ConnectionTracer struct { StartedConnection func(local, remote net.Addr, srcConnID, destConnID ConnectionID) - NegotiatedVersion func(chosen VersionNumber, clientVersions, serverVersions []VersionNumber) - ClosedConnection func(error) - SentTransportParameters func(*TransportParameters) - ReceivedTransportParameters func(*TransportParameters) + NegotiatedVersion func(chosen Version, clientVersions, serverVersions []Version) + ClosedConnection func(err error) + SentTransportParameters func(parameters *TransportParameters) + ReceivedTransportParameters func(parameters *TransportParameters) RestoredTransportParameters func(parameters *TransportParameters) // for 0-RTT - SentLongHeaderPacket func(*ExtendedHeader, ByteCount, ECN, *AckFrame, []Frame) - SentShortHeaderPacket func(*ShortHeader, ByteCount, ECN, *AckFrame, []Frame) - ReceivedVersionNegotiationPacket func(dest, src ArbitraryLenConnectionID, _ []VersionNumber) - ReceivedRetry func(*Header) - ReceivedLongHeaderPacket func(*ExtendedHeader, ByteCount, ECN, []Frame) - ReceivedShortHeaderPacket func(*ShortHeader, ByteCount, ECN, []Frame) - BufferedPacket func(PacketType, ByteCount) - DroppedPacket func(PacketType, PacketNumber, ByteCount, PacketDropReason) + SentLongHeaderPacket func(hdr *ExtendedHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) + SentShortHeaderPacket func(hdr *ShortHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) + ReceivedVersionNegotiationPacket func(dest, src ArbitraryLenConnectionID, versions []Version) + ReceivedRetry func(hdr *Header) + ReceivedLongHeaderPacket func(hdr *ExtendedHeader, size ByteCount, ecn ECN, frames []Frame) + ReceivedShortHeaderPacket func(hdr *ShortHeader, size ByteCount, ecn ECN, frames []Frame) + BufferedPacket func(packetType PacketType, size ByteCount) + DroppedPacket func(packetType PacketType, pn PacketNumber, size ByteCount, reason PacketDropReason) UpdatedMetrics func(rttStats *RTTStats, cwnd, bytesInFlight ByteCount, packetsInFlight int) - AcknowledgedPacket func(EncryptionLevel, PacketNumber) - LostPacket func(EncryptionLevel, PacketNumber, PacketLossReason) + AcknowledgedPacket func(encLevel EncryptionLevel, pn PacketNumber) + LostPacket func(encLevel EncryptionLevel, pn PacketNumber, reason PacketLossReason) UpdatedMTU func(mtu ByteCount, done bool) - UpdatedCongestionState func(CongestionState) + UpdatedCongestionState func(state CongestionState) UpdatedPTOCount func(value uint32) - UpdatedKeyFromTLS func(EncryptionLevel, Perspective) + UpdatedKeyFromTLS func(encLevel EncryptionLevel, p Perspective) UpdatedKey func(keyPhase KeyPhase, remote bool) - DroppedEncryptionLevel func(EncryptionLevel) + DroppedEncryptionLevel func(encLevel EncryptionLevel) DroppedKey func(keyPhase KeyPhase) - SetLossTimer func(TimerType, EncryptionLevel, time.Time) - LossTimerExpired func(TimerType, EncryptionLevel) + SetLossTimer func(timerType TimerType, encLevel EncryptionLevel, time time.Time) + LossTimerExpired func(timerType TimerType, encLevel EncryptionLevel) LossTimerCanceled func() ECNStateUpdated func(state ECNState, trigger ECNStateTrigger) ChoseALPN func(protocol string) @@ -40,232 +42,3 @@ type ConnectionTracer struct { Close func() Debug func(name, msg string) } - -// NewMultiplexedConnectionTracer creates a new connection tracer that multiplexes events to multiple tracers. -func NewMultiplexedConnectionTracer(tracers ...*ConnectionTracer) *ConnectionTracer { - if len(tracers) == 0 { - return nil - } - if len(tracers) == 1 { - return tracers[0] - } - return &ConnectionTracer{ - StartedConnection: func(local, remote net.Addr, srcConnID, destConnID ConnectionID) { - for _, t := range tracers { - if t.StartedConnection != nil { - t.StartedConnection(local, remote, srcConnID, destConnID) - } - } - }, - NegotiatedVersion: func(chosen VersionNumber, clientVersions, serverVersions []VersionNumber) { - for _, t := range tracers { - if t.NegotiatedVersion != nil { - t.NegotiatedVersion(chosen, clientVersions, serverVersions) - } - } - }, - ClosedConnection: func(e error) { - for _, t := range tracers { - if t.ClosedConnection != nil { - t.ClosedConnection(e) - } - } - }, - SentTransportParameters: func(tp *TransportParameters) { - for _, t := range tracers { - if t.SentTransportParameters != nil { - t.SentTransportParameters(tp) - } - } - }, - ReceivedTransportParameters: func(tp *TransportParameters) { - for _, t := range tracers { - if t.ReceivedTransportParameters != nil { - t.ReceivedTransportParameters(tp) - } - } - }, - RestoredTransportParameters: func(tp *TransportParameters) { - for _, t := range tracers { - if t.RestoredTransportParameters != nil { - t.RestoredTransportParameters(tp) - } - } - }, - SentLongHeaderPacket: func(hdr *ExtendedHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) { - for _, t := range tracers { - if t.SentLongHeaderPacket != nil { - t.SentLongHeaderPacket(hdr, size, ecn, ack, frames) - } - } - }, - SentShortHeaderPacket: func(hdr *ShortHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) { - for _, t := range tracers { - if t.SentShortHeaderPacket != nil { - t.SentShortHeaderPacket(hdr, size, ecn, ack, frames) - } - } - }, - ReceivedVersionNegotiationPacket: func(dest, src ArbitraryLenConnectionID, versions []VersionNumber) { - for _, t := range tracers { - if t.ReceivedVersionNegotiationPacket != nil { - t.ReceivedVersionNegotiationPacket(dest, src, versions) - } - } - }, - ReceivedRetry: func(hdr *Header) { - for _, t := range tracers { - if t.ReceivedRetry != nil { - t.ReceivedRetry(hdr) - } - } - }, - ReceivedLongHeaderPacket: func(hdr *ExtendedHeader, size ByteCount, ecn ECN, frames []Frame) { - for _, t := range tracers { - if t.ReceivedLongHeaderPacket != nil { - t.ReceivedLongHeaderPacket(hdr, size, ecn, frames) - } - } - }, - ReceivedShortHeaderPacket: func(hdr *ShortHeader, size ByteCount, ecn ECN, frames []Frame) { - for _, t := range tracers { - if t.ReceivedShortHeaderPacket != nil { - t.ReceivedShortHeaderPacket(hdr, size, ecn, frames) - } - } - }, - BufferedPacket: func(typ PacketType, size ByteCount) { - for _, t := range tracers { - if t.BufferedPacket != nil { - t.BufferedPacket(typ, size) - } - } - }, - DroppedPacket: func(typ PacketType, pn PacketNumber, size ByteCount, reason PacketDropReason) { - for _, t := range tracers { - if t.DroppedPacket != nil { - t.DroppedPacket(typ, pn, size, reason) - } - } - }, - UpdatedMetrics: func(rttStats *RTTStats, cwnd, bytesInFlight ByteCount, packetsInFlight int) { - for _, t := range tracers { - if t.UpdatedMetrics != nil { - t.UpdatedMetrics(rttStats, cwnd, bytesInFlight, packetsInFlight) - } - } - }, - AcknowledgedPacket: func(encLevel EncryptionLevel, pn PacketNumber) { - for _, t := range tracers { - if t.AcknowledgedPacket != nil { - t.AcknowledgedPacket(encLevel, pn) - } - } - }, - LostPacket: func(encLevel EncryptionLevel, pn PacketNumber, reason PacketLossReason) { - for _, t := range tracers { - if t.LostPacket != nil { - t.LostPacket(encLevel, pn, reason) - } - } - }, - UpdatedMTU: func(mtu ByteCount, done bool) { - for _, t := range tracers { - if t.UpdatedMTU != nil { - t.UpdatedMTU(mtu, done) - } - } - }, - UpdatedCongestionState: func(state CongestionState) { - for _, t := range tracers { - if t.UpdatedCongestionState != nil { - t.UpdatedCongestionState(state) - } - } - }, - UpdatedPTOCount: func(value uint32) { - for _, t := range tracers { - if t.UpdatedPTOCount != nil { - t.UpdatedPTOCount(value) - } - } - }, - UpdatedKeyFromTLS: func(encLevel EncryptionLevel, perspective Perspective) { - for _, t := range tracers { - if t.UpdatedKeyFromTLS != nil { - t.UpdatedKeyFromTLS(encLevel, perspective) - } - } - }, - UpdatedKey: func(generation KeyPhase, remote bool) { - for _, t := range tracers { - if t.UpdatedKey != nil { - t.UpdatedKey(generation, remote) - } - } - }, - DroppedEncryptionLevel: func(encLevel EncryptionLevel) { - for _, t := range tracers { - if t.DroppedEncryptionLevel != nil { - t.DroppedEncryptionLevel(encLevel) - } - } - }, - DroppedKey: func(generation KeyPhase) { - for _, t := range tracers { - if t.DroppedKey != nil { - t.DroppedKey(generation) - } - } - }, - SetLossTimer: func(typ TimerType, encLevel EncryptionLevel, exp time.Time) { - for _, t := range tracers { - if t.SetLossTimer != nil { - t.SetLossTimer(typ, encLevel, exp) - } - } - }, - LossTimerExpired: func(typ TimerType, encLevel EncryptionLevel) { - for _, t := range tracers { - if t.LossTimerExpired != nil { - t.LossTimerExpired(typ, encLevel) - } - } - }, - LossTimerCanceled: func() { - for _, t := range tracers { - if t.LossTimerCanceled != nil { - t.LossTimerCanceled() - } - } - }, - ECNStateUpdated: func(state ECNState, trigger ECNStateTrigger) { - for _, t := range tracers { - if t.ECNStateUpdated != nil { - t.ECNStateUpdated(state, trigger) - } - } - }, - ChoseALPN: func(protocol string) { - for _, t := range tracers { - if t.ChoseALPN != nil { - t.ChoseALPN(protocol) - } - } - }, - Close: func() { - for _, t := range tracers { - if t.Close != nil { - t.Close() - } - } - }, - Debug: func(name, msg string) { - for _, t := range tracers { - if t.Debug != nil { - t.Debug(name, msg) - } - } - }, - } -} diff --git a/vendor/github.com/quic-go/quic-go/logging/connection_tracer_multiplexer.go b/vendor/github.com/quic-go/quic-go/logging/connection_tracer_multiplexer.go new file mode 100644 index 00000000..3a87058c --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/connection_tracer_multiplexer.go @@ -0,0 +1,236 @@ +// Code generated by generate_multiplexer.go; DO NOT EDIT. + +package logging + +import ( + "net" + "time" +) + +func NewMultiplexedConnectionTracer(tracers ...*ConnectionTracer) *ConnectionTracer { + if len(tracers) == 0 { + return nil + } + if len(tracers) == 1 { + return tracers[0] + } + return &ConnectionTracer{ + StartedConnection: func(local net.Addr, remote net.Addr, srcConnID ConnectionID, destConnID ConnectionID) { + for _, t := range tracers { + if t.StartedConnection != nil { + t.StartedConnection(local, remote, srcConnID, destConnID) + } + } + }, + NegotiatedVersion: func(chosen Version, clientVersions []Version, serverVersions []Version) { + for _, t := range tracers { + if t.NegotiatedVersion != nil { + t.NegotiatedVersion(chosen, clientVersions, serverVersions) + } + } + }, + ClosedConnection: func(err error) { + for _, t := range tracers { + if t.ClosedConnection != nil { + t.ClosedConnection(err) + } + } + }, + SentTransportParameters: func(parameters *TransportParameters) { + for _, t := range tracers { + if t.SentTransportParameters != nil { + t.SentTransportParameters(parameters) + } + } + }, + ReceivedTransportParameters: func(parameters *TransportParameters) { + for _, t := range tracers { + if t.ReceivedTransportParameters != nil { + t.ReceivedTransportParameters(parameters) + } + } + }, + RestoredTransportParameters: func(parameters *TransportParameters) { + for _, t := range tracers { + if t.RestoredTransportParameters != nil { + t.RestoredTransportParameters(parameters) + } + } + }, + SentLongHeaderPacket: func(hdr *ExtendedHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) { + for _, t := range tracers { + if t.SentLongHeaderPacket != nil { + t.SentLongHeaderPacket(hdr, size, ecn, ack, frames) + } + } + }, + SentShortHeaderPacket: func(hdr *ShortHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) { + for _, t := range tracers { + if t.SentShortHeaderPacket != nil { + t.SentShortHeaderPacket(hdr, size, ecn, ack, frames) + } + } + }, + ReceivedVersionNegotiationPacket: func(dest ArbitraryLenConnectionID, src ArbitraryLenConnectionID, versions []Version) { + for _, t := range tracers { + if t.ReceivedVersionNegotiationPacket != nil { + t.ReceivedVersionNegotiationPacket(dest, src, versions) + } + } + }, + ReceivedRetry: func(hdr *Header) { + for _, t := range tracers { + if t.ReceivedRetry != nil { + t.ReceivedRetry(hdr) + } + } + }, + ReceivedLongHeaderPacket: func(hdr *ExtendedHeader, size ByteCount, ecn ECN, frames []Frame) { + for _, t := range tracers { + if t.ReceivedLongHeaderPacket != nil { + t.ReceivedLongHeaderPacket(hdr, size, ecn, frames) + } + } + }, + ReceivedShortHeaderPacket: func(hdr *ShortHeader, size ByteCount, ecn ECN, frames []Frame) { + for _, t := range tracers { + if t.ReceivedShortHeaderPacket != nil { + t.ReceivedShortHeaderPacket(hdr, size, ecn, frames) + } + } + }, + BufferedPacket: func(packetType PacketType, size ByteCount) { + for _, t := range tracers { + if t.BufferedPacket != nil { + t.BufferedPacket(packetType, size) + } + } + }, + DroppedPacket: func(packetType PacketType, pn PacketNumber, size ByteCount, reason PacketDropReason) { + for _, t := range tracers { + if t.DroppedPacket != nil { + t.DroppedPacket(packetType, pn, size, reason) + } + } + }, + UpdatedMetrics: func(rttStats *RTTStats, cwnd ByteCount, bytesInFlight ByteCount, packetsInFlight int) { + for _, t := range tracers { + if t.UpdatedMetrics != nil { + t.UpdatedMetrics(rttStats, cwnd, bytesInFlight, packetsInFlight) + } + } + }, + AcknowledgedPacket: func(encLevel EncryptionLevel, pn PacketNumber) { + for _, t := range tracers { + if t.AcknowledgedPacket != nil { + t.AcknowledgedPacket(encLevel, pn) + } + } + }, + LostPacket: func(encLevel EncryptionLevel, pn PacketNumber, reason PacketLossReason) { + for _, t := range tracers { + if t.LostPacket != nil { + t.LostPacket(encLevel, pn, reason) + } + } + }, + UpdatedMTU: func(mtu ByteCount, done bool) { + for _, t := range tracers { + if t.UpdatedMTU != nil { + t.UpdatedMTU(mtu, done) + } + } + }, + UpdatedCongestionState: func(state CongestionState) { + for _, t := range tracers { + if t.UpdatedCongestionState != nil { + t.UpdatedCongestionState(state) + } + } + }, + UpdatedPTOCount: func(value uint32) { + for _, t := range tracers { + if t.UpdatedPTOCount != nil { + t.UpdatedPTOCount(value) + } + } + }, + UpdatedKeyFromTLS: func(encLevel EncryptionLevel, p Perspective) { + for _, t := range tracers { + if t.UpdatedKeyFromTLS != nil { + t.UpdatedKeyFromTLS(encLevel, p) + } + } + }, + UpdatedKey: func(keyPhase KeyPhase, remote bool) { + for _, t := range tracers { + if t.UpdatedKey != nil { + t.UpdatedKey(keyPhase, remote) + } + } + }, + DroppedEncryptionLevel: func(encLevel EncryptionLevel) { + for _, t := range tracers { + if t.DroppedEncryptionLevel != nil { + t.DroppedEncryptionLevel(encLevel) + } + } + }, + DroppedKey: func(keyPhase KeyPhase) { + for _, t := range tracers { + if t.DroppedKey != nil { + t.DroppedKey(keyPhase) + } + } + }, + SetLossTimer: func(timerType TimerType, encLevel EncryptionLevel, time time.Time) { + for _, t := range tracers { + if t.SetLossTimer != nil { + t.SetLossTimer(timerType, encLevel, time) + } + } + }, + LossTimerExpired: func(timerType TimerType, encLevel EncryptionLevel) { + for _, t := range tracers { + if t.LossTimerExpired != nil { + t.LossTimerExpired(timerType, encLevel) + } + } + }, + LossTimerCanceled: func() { + for _, t := range tracers { + if t.LossTimerCanceled != nil { + t.LossTimerCanceled() + } + } + }, + ECNStateUpdated: func(state ECNState, trigger ECNStateTrigger) { + for _, t := range tracers { + if t.ECNStateUpdated != nil { + t.ECNStateUpdated(state, trigger) + } + } + }, + ChoseALPN: func(protocol string) { + for _, t := range tracers { + if t.ChoseALPN != nil { + t.ChoseALPN(protocol) + } + } + }, + Close: func() { + for _, t := range tracers { + if t.Close != nil { + t.Close() + } + } + }, + Debug: func(name string, msg string) { + for _, t := range tracers { + if t.Debug != nil { + t.Debug(name, msg) + } + } + }, + } +} diff --git a/vendor/github.com/quic-go/quic-go/logging/generate_multiplexer.go b/vendor/github.com/quic-go/quic-go/logging/generate_multiplexer.go new file mode 100644 index 00000000..c152b846 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/generate_multiplexer.go @@ -0,0 +1,161 @@ +//go:build generate + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "strings" + "text/template" + + "golang.org/x/tools/imports" +) + +func main() { + if len(os.Args) != 5 { + log.Fatalf("Usage: %s ", os.Args[0]) + } + + structName := os.Args[1] + inputFile := os.Args[2] + templateFile := os.Args[3] + outputFile := os.Args[4] + + fset := token.NewFileSet() + + // Parse the input file containing the struct type + file, err := parser.ParseFile(fset, inputFile, nil, parser.AllErrors) + if err != nil { + log.Fatalf("Failed to parse file: %v", err) + } + + var fields []*ast.Field + + // Find the specified struct type in the AST + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.TYPE { + continue + } + for _, spec := range genDecl.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok || typeSpec.Name.Name != structName { + continue + } + structType, ok := typeSpec.Type.(*ast.StructType) + if !ok { + log.Fatalf("%s is not a struct", structName) + } + fields = structType.Fields.List + break + } + } + + if fields == nil { + log.Fatalf("Could not find %s type", structName) + } + + // Prepare data for the template + type FieldData struct { + Name string + Params string + Args string + HasParams bool + ReturnTypes string + HasReturn bool + } + + var fieldDataList []FieldData + + for _, field := range fields { + funcType, ok := field.Type.(*ast.FuncType) + if !ok { + continue + } + for _, name := range field.Names { + fieldData := FieldData{Name: name.Name} + + // extract parameters + var params []string + var args []string + if funcType.Params != nil { + for i, param := range funcType.Params.List { + // We intentionally reject unnamed (and, further down, "_") function parameters. + // We could auto-generate parameter names, + // but having meaningful variable names will be more helpful for the user. + if len(param.Names) == 0 { + log.Fatalf("encountered unnamed parameter at position %d in function %s", i, fieldData.Name) + } + var buf bytes.Buffer + printer.Fprint(&buf, fset, param.Type) + paramType := buf.String() + for _, paramName := range param.Names { + if paramName.Name == "_" { + log.Fatalf("encountered underscore parameter at position %d in function %s", i, fieldData.Name) + } + params = append(params, fmt.Sprintf("%s %s", paramName.Name, paramType)) + args = append(args, paramName.Name) + } + } + } + fieldData.Params = strings.Join(params, ", ") + fieldData.Args = strings.Join(args, ", ") + fieldData.HasParams = len(params) > 0 + + // extract return types + if funcType.Results != nil && len(funcType.Results.List) > 0 { + fieldData.HasReturn = true + var returns []string + for _, result := range funcType.Results.List { + var buf bytes.Buffer + printer.Fprint(&buf, fset, result.Type) + returns = append(returns, buf.String()) + } + if len(returns) == 1 { + fieldData.ReturnTypes = fmt.Sprintf(" %s", returns[0]) + } else { + fieldData.ReturnTypes = fmt.Sprintf(" (%s)", strings.Join(returns, ", ")) + } + } + + fieldDataList = append(fieldDataList, fieldData) + } + } + + // Read the template from file + templateContent, err := os.ReadFile(templateFile) + if err != nil { + log.Fatalf("Failed to read template file: %v", err) + } + + // Generate the code using the template + tmpl, err := template.New("multiplexer").Funcs(template.FuncMap{"join": strings.Join}).Parse(string(templateContent)) + if err != nil { + log.Fatalf("Failed to parse template: %v", err) + } + + var generatedCode bytes.Buffer + generatedCode.WriteString("// Code generated by generate_multiplexer.go; DO NOT EDIT.\n\n") + if err = tmpl.Execute(&generatedCode, map[string]interface{}{ + "Fields": fieldDataList, + "StructName": structName, + }); err != nil { + log.Fatalf("Failed to execute template: %v", err) + } + + // Format the generated code and add imports + formattedCode, err := imports.Process(outputFile, generatedCode.Bytes(), nil) + if err != nil { + log.Fatalf("Failed to process imports: %v", err) + } + + if err := os.WriteFile(outputFile, formattedCode, 0o644); err != nil { + log.Fatalf("Failed to write output file: %v", err) + } +} diff --git a/vendor/github.com/quic-go/quic-go/logging/interface.go b/vendor/github.com/quic-go/quic-go/logging/interface.go index a618a189..1f8edb92 100644 --- a/vendor/github.com/quic-go/quic-go/logging/interface.go +++ b/vendor/github.com/quic-go/quic-go/logging/interface.go @@ -36,8 +36,8 @@ type ( StreamNum = protocol.StreamNum // The StreamType is the type of the stream (unidirectional or bidirectional). StreamType = protocol.StreamType - // The VersionNumber is the QUIC version. - VersionNumber = protocol.Version + // The Version is the QUIC version. + Version = protocol.Version // The Header is the QUIC packet header, before removing header protection. Header = wire.Header @@ -72,27 +72,27 @@ const ( const ( // KeyPhaseZero is key phase bit 0 - KeyPhaseZero KeyPhaseBit = protocol.KeyPhaseZero + KeyPhaseZero = protocol.KeyPhaseZero // KeyPhaseOne is key phase bit 1 - KeyPhaseOne KeyPhaseBit = protocol.KeyPhaseOne + KeyPhaseOne = protocol.KeyPhaseOne ) const ( // PerspectiveServer is used for a QUIC server - PerspectiveServer Perspective = protocol.PerspectiveServer + PerspectiveServer = protocol.PerspectiveServer // PerspectiveClient is used for a QUIC client - PerspectiveClient Perspective = protocol.PerspectiveClient + PerspectiveClient = protocol.PerspectiveClient ) const ( // EncryptionInitial is the Initial encryption level - EncryptionInitial EncryptionLevel = protocol.EncryptionInitial + EncryptionInitial = protocol.EncryptionInitial // EncryptionHandshake is the Handshake encryption level - EncryptionHandshake EncryptionLevel = protocol.EncryptionHandshake + EncryptionHandshake = protocol.EncryptionHandshake // Encryption1RTT is the 1-RTT encryption level - Encryption1RTT EncryptionLevel = protocol.Encryption1RTT + Encryption1RTT = protocol.Encryption1RTT // Encryption0RTT is the 0-RTT encryption level - Encryption0RTT EncryptionLevel = protocol.Encryption0RTT + Encryption0RTT = protocol.Encryption0RTT ) const ( diff --git a/vendor/github.com/quic-go/quic-go/logging/multiplexer.tmpl b/vendor/github.com/quic-go/quic-go/logging/multiplexer.tmpl new file mode 100644 index 00000000..9ba52e0f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/multiplexer.tmpl @@ -0,0 +1,21 @@ +package logging + +func NewMultiplexed{{ .StructName }} (tracers ...*{{ .StructName }}) *{{ .StructName }} { + if len(tracers) == 0 { + return nil + } + if len(tracers) == 1 { + return tracers[0] + } + return &{{ .StructName }}{ + {{- range .Fields }} + {{ .Name }}: func({{ .Params }}){{ .ReturnTypes }} { + for _, t := range tracers { + if t.{{ .Name }} != nil { + t.{{ .Name }}({{ .Args }}) + } + } + }, + {{- end }} + } +} diff --git a/vendor/github.com/quic-go/quic-go/logging/tracer.go b/vendor/github.com/quic-go/quic-go/logging/tracer.go index edd85dba..4fe01462 100644 --- a/vendor/github.com/quic-go/quic-go/logging/tracer.go +++ b/vendor/github.com/quic-go/quic-go/logging/tracer.go @@ -2,58 +2,13 @@ package logging import "net" +//go:generate go run generate_multiplexer.go Tracer tracer.go multiplexer.tmpl tracer_multiplexer.go + // A Tracer traces events. type Tracer struct { - SentPacket func(net.Addr, *Header, ByteCount, []Frame) - SentVersionNegotiationPacket func(_ net.Addr, dest, src ArbitraryLenConnectionID, _ []VersionNumber) - DroppedPacket func(net.Addr, PacketType, ByteCount, PacketDropReason) + SentPacket func(dest net.Addr, hdr *Header, size ByteCount, frames []Frame) + SentVersionNegotiationPacket func(dest net.Addr, destConnID, srcConnID ArbitraryLenConnectionID, versions []Version) + DroppedPacket func(addr net.Addr, packetType PacketType, size ByteCount, reason PacketDropReason) Debug func(name, msg string) Close func() } - -// NewMultiplexedTracer creates a new tracer that multiplexes events to multiple tracers. -func NewMultiplexedTracer(tracers ...*Tracer) *Tracer { - if len(tracers) == 0 { - return nil - } - if len(tracers) == 1 { - return tracers[0] - } - return &Tracer{ - SentPacket: func(remote net.Addr, hdr *Header, size ByteCount, frames []Frame) { - for _, t := range tracers { - if t.SentPacket != nil { - t.SentPacket(remote, hdr, size, frames) - } - } - }, - SentVersionNegotiationPacket: func(remote net.Addr, dest, src ArbitraryLenConnectionID, versions []VersionNumber) { - for _, t := range tracers { - if t.SentVersionNegotiationPacket != nil { - t.SentVersionNegotiationPacket(remote, dest, src, versions) - } - } - }, - DroppedPacket: func(remote net.Addr, typ PacketType, size ByteCount, reason PacketDropReason) { - for _, t := range tracers { - if t.DroppedPacket != nil { - t.DroppedPacket(remote, typ, size, reason) - } - } - }, - Debug: func(name, msg string) { - for _, t := range tracers { - if t.Debug != nil { - t.Debug(name, msg) - } - } - }, - Close: func() { - for _, t := range tracers { - if t.Close != nil { - t.Close() - } - } - }, - } -} diff --git a/vendor/github.com/quic-go/quic-go/logging/tracer_multiplexer.go b/vendor/github.com/quic-go/quic-go/logging/tracer_multiplexer.go new file mode 100644 index 00000000..f0878cfe --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/tracer_multiplexer.go @@ -0,0 +1,51 @@ +// Code generated by generate_multiplexer.go; DO NOT EDIT. + +package logging + +import "net" + +func NewMultiplexedTracer(tracers ...*Tracer) *Tracer { + if len(tracers) == 0 { + return nil + } + if len(tracers) == 1 { + return tracers[0] + } + return &Tracer{ + SentPacket: func(dest net.Addr, hdr *Header, size ByteCount, frames []Frame) { + for _, t := range tracers { + if t.SentPacket != nil { + t.SentPacket(dest, hdr, size, frames) + } + } + }, + SentVersionNegotiationPacket: func(dest net.Addr, destConnID ArbitraryLenConnectionID, srcConnID ArbitraryLenConnectionID, versions []Version) { + for _, t := range tracers { + if t.SentVersionNegotiationPacket != nil { + t.SentVersionNegotiationPacket(dest, destConnID, srcConnID, versions) + } + } + }, + DroppedPacket: func(addr net.Addr, packetType PacketType, size ByteCount, reason PacketDropReason) { + for _, t := range tracers { + if t.DroppedPacket != nil { + t.DroppedPacket(addr, packetType, size, reason) + } + } + }, + Debug: func(name string, msg string) { + for _, t := range tracers { + if t.Debug != nil { + t.Debug(name, msg) + } + } + }, + Close: func() { + for _, t := range tracers { + if t.Close != nil { + t.Close() + } + } + }, + } +} diff --git a/vendor/github.com/quic-go/quic-go/logging/types.go b/vendor/github.com/quic-go/quic-go/logging/types.go index 0d79b0a9..65da3559 100644 --- a/vendor/github.com/quic-go/quic-go/logging/types.go +++ b/vendor/github.com/quic-go/quic-go/logging/types.go @@ -63,9 +63,11 @@ type TimerType uint8 const ( // TimerTypeACK is the timer type for the early retransmit timer - TimerTypeACK TimerType = iota + TimerTypeACK TimerType = iota + 1 // TimerTypePTO is the timer type for the PTO retransmit timer TimerTypePTO + // TimerTypePathProbe is the timer type for the path probe retransmit timer + TimerTypePathProbe ) // TimeoutReason is the reason why a connection is closed diff --git a/vendor/github.com/quic-go/quic-go/mockgen.go b/vendor/github.com/quic-go/quic-go/mockgen.go index 81cc4a5e..1a8b28db 100644 --- a/vendor/github.com/quic-go/quic-go/mockgen.go +++ b/vendor/github.com/quic-go/quic-go/mockgen.go @@ -14,23 +14,17 @@ type Sender = sender //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_internal_test.go github.com/quic-go/quic-go StreamI" type StreamI = streamI -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_crypto_stream_test.go github.com/quic-go/quic-go CryptoStream" -type CryptoStream = cryptoStream - //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_receive_stream_internal_test.go github.com/quic-go/quic-go ReceiveStreamI" type ReceiveStreamI = receiveStreamI //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_send_stream_internal_test.go github.com/quic-go/quic-go SendStreamI" type SendStreamI = sendStreamI -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_getter_test.go github.com/quic-go/quic-go StreamGetter" -type StreamGetter = streamGetter - //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_sender_test.go github.com/quic-go/quic-go StreamSender" type StreamSender = streamSender -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_crypto_data_handler_test.go github.com/quic-go/quic-go CryptoDataHandler" -type CryptoDataHandler = cryptoDataHandler +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_control_frame_getter_test.go github.com/quic-go/quic-go StreamControlFrameGetter" +type StreamControlFrameGetter = streamControlFrameGetter //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_frame_source_test.go github.com/quic-go/quic-go FrameSource" type FrameSource = frameSource @@ -67,10 +61,4 @@ type PacketHandler = packetHandler //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_packet_handler_manager_test.go github.com/quic-go/quic-go PacketHandlerManager" type PacketHandlerManager = packetHandlerManager -// Need to use source mode for the batchConn, since reflect mode follows type aliases. -// See https://github.com/golang/mock/issues/244 for details. -// -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package quic -self_package github.com/quic-go/quic-go -source sys_conn_oob.go -destination mock_batch_conn_test.go -mock_names batchConn=MockBatchConn" - -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package quic -self_package github.com/quic-go/quic-go -self_package github.com/quic-go/quic-go -destination mock_token_store_test.go github.com/quic-go/quic-go TokenStore" //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package quic -self_package github.com/quic-go/quic-go -self_package github.com/quic-go/quic-go -destination mock_packetconn_test.go net PacketConn" diff --git a/vendor/github.com/quic-go/quic-go/mtu_discoverer.go b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go index 3f3a640a..096eba14 100644 --- a/vendor/github.com/quic-go/quic-go/mtu_discoverer.go +++ b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go @@ -13,16 +13,17 @@ import ( type mtuDiscoverer interface { // Start starts the MTU discovery process. // It's unnecessary to call ShouldSendProbe before that. - Start() + Start(now time.Time) ShouldSendProbe(now time.Time) bool CurrentSize() protocol.ByteCount - GetPing() (ping ackhandler.Frame, datagramSize protocol.ByteCount) + GetPing(now time.Time) (ping ackhandler.Frame, datagramSize protocol.ByteCount) + Reset(now time.Time, start, max protocol.ByteCount) } const ( // At some point, we have to stop searching for a higher MTU. // We're happy to send a packet that's 10 bytes smaller than the actual MTU. - maxMTUDiff = 20 + maxMTUDiff protocol.ByteCount = 20 // send a probe packet every mtuProbeDelay RTTs mtuProbeDelay = 5 // Once maxLostMTUProbes MTU probe packets larger than a certain size are lost, @@ -88,18 +89,21 @@ const ( type mtuFinder struct { lastProbeTime time.Time - mtuIncreased func(protocol.ByteCount) rttStats *utils.RTTStats inFlight protocol.ByteCount // the size of the probe packet currently in flight. InvalidByteCount if none is in flight min protocol.ByteCount - limit protocol.ByteCount // on initialization, we treat the maximum size as the first "lost" packet lost [maxLostMTUProbes]protocol.ByteCount lastProbeWasLost bool + // The generation is used to ignore ACKs / losses for probe packets sent before a reset. + // Resets happen when the connection is migrated to a new path. + // We're therefore not concerned about overflows of this counter. + generation uint8 + tracer *logging.ConnectionTracer } @@ -108,17 +112,19 @@ var _ mtuDiscoverer = &mtuFinder{} func newMTUDiscoverer( rttStats *utils.RTTStats, start, max protocol.ByteCount, - mtuIncreased func(protocol.ByteCount), tracer *logging.ConnectionTracer, ) *mtuFinder { f := &mtuFinder{ - inFlight: protocol.InvalidByteCount, - min: start, - limit: max, - rttStats: rttStats, - mtuIncreased: mtuIncreased, - tracer: tracer, + inFlight: protocol.InvalidByteCount, + rttStats: rttStats, + tracer: tracer, } + f.init(start, max) + return f +} + +func (f *mtuFinder) init(start, max protocol.ByteCount) { + f.min = start for i := range f.lost { if i == 0 { f.lost[i] = max @@ -126,7 +132,6 @@ func newMTUDiscoverer( } f.lost[i] = protocol.InvalidByteCount } - return f } func (f *mtuFinder) done() bool { @@ -142,8 +147,8 @@ func (f *mtuFinder) max() protocol.ByteCount { return f.lost[len(f.lost)-1] } -func (f *mtuFinder) Start() { - f.lastProbeTime = time.Now() // makes sure the first probe packet is not sent immediately +func (f *mtuFinder) Start(now time.Time) { + f.lastProbeTime = now // makes sure the first probe packet is not sent immediately } func (f *mtuFinder) ShouldSendProbe(now time.Time) bool { @@ -156,18 +161,18 @@ func (f *mtuFinder) ShouldSendProbe(now time.Time) bool { return !now.Before(f.lastProbeTime.Add(mtuProbeDelay * f.rttStats.SmoothedRTT())) } -func (f *mtuFinder) GetPing() (ackhandler.Frame, protocol.ByteCount) { +func (f *mtuFinder) GetPing(now time.Time) (ackhandler.Frame, protocol.ByteCount) { var size protocol.ByteCount if f.lastProbeWasLost { size = (f.min + f.lost[0]) / 2 } else { size = (f.min + f.max()) / 2 } - f.lastProbeTime = time.Now() + f.lastProbeTime = now f.inFlight = size return ackhandler.Frame{ Frame: &wire.PingFrame{}, - Handler: &mtuFinderAckHandler{f}, + Handler: &mtuFinderAckHandler{mtuFinder: f, generation: f.generation}, }, size } @@ -175,13 +180,26 @@ func (f *mtuFinder) CurrentSize() protocol.ByteCount { return f.min } +func (f *mtuFinder) Reset(now time.Time, start, max protocol.ByteCount) { + f.generation++ + f.lastProbeTime = now + f.lastProbeWasLost = false + f.inFlight = protocol.InvalidByteCount + f.init(start, max) +} + type mtuFinderAckHandler struct { *mtuFinder + generation uint8 } var _ ackhandler.FrameHandler = &mtuFinderAckHandler{} func (h *mtuFinderAckHandler) OnAcked(wire.Frame) { + if h.generation != h.mtuFinder.generation { + // ACK for probe sent before reset + return + } size := h.inFlight if size == protocol.InvalidByteCount { panic("OnAcked callback called although there's no MTU probe packet in flight") @@ -209,10 +227,13 @@ func (h *mtuFinderAckHandler) OnAcked(wire.Frame) { if h.tracer != nil && h.tracer.UpdatedMTU != nil { h.tracer.UpdatedMTU(size, h.done()) } - h.mtuIncreased(size) } func (h *mtuFinderAckHandler) OnLost(wire.Frame) { + if h.generation != h.mtuFinder.generation { + // probe sent before reset received + return + } size := h.inFlight if size == protocol.InvalidByteCount { panic("OnLost callback called although there's no MTU probe packet in flight") diff --git a/vendor/github.com/quic-go/quic-go/multiplexer.go b/vendor/github.com/quic-go/quic-go/multiplexer.go deleted file mode 100644 index 85f7f403..00000000 --- a/vendor/github.com/quic-go/quic-go/multiplexer.go +++ /dev/null @@ -1,75 +0,0 @@ -package quic - -import ( - "fmt" - "net" - "sync" - - "github.com/quic-go/quic-go/internal/utils" -) - -var ( - connMuxerOnce sync.Once - connMuxer multiplexer -) - -type indexableConn interface{ LocalAddr() net.Addr } - -type multiplexer interface { - AddConn(conn indexableConn) - RemoveConn(indexableConn) error -} - -// The connMultiplexer listens on multiple net.PacketConns and dispatches -// incoming packets to the connection handler. -type connMultiplexer struct { - mutex sync.Mutex - - conns map[string] /* LocalAddr().String() */ indexableConn - logger utils.Logger -} - -var _ multiplexer = &connMultiplexer{} - -func getMultiplexer() multiplexer { - connMuxerOnce.Do(func() { - connMuxer = &connMultiplexer{ - conns: make(map[string]indexableConn), - logger: utils.DefaultLogger.WithPrefix("muxer"), - } - }) - return connMuxer -} - -func (m *connMultiplexer) index(addr net.Addr) string { - return addr.Network() + " " + addr.String() -} - -func (m *connMultiplexer) AddConn(c indexableConn) { - m.mutex.Lock() - defer m.mutex.Unlock() - - connIndex := m.index(c.LocalAddr()) - p, ok := m.conns[connIndex] - if ok { - // Panics if we're already listening on this connection. - // This is a safeguard because we're introducing a breaking API change, see - // https://github.com/quic-go/quic-go/issues/3727 for details. - // We'll remove this at a later time, when most users of the library have made the switch. - panic("connection already exists") // TODO: write a nice message - } - m.conns[connIndex] = p -} - -func (m *connMultiplexer) RemoveConn(c indexableConn) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - connIndex := m.index(c.LocalAddr()) - if _, ok := m.conns[connIndex]; !ok { - return fmt.Errorf("cannote remove connection, connection is unknown") - } - - delete(m.conns, connIndex) - return nil -} diff --git a/vendor/github.com/quic-go/quic-go/oss-fuzz.sh b/vendor/github.com/quic-go/quic-go/oss-fuzz.sh index 22a577fe..92a57a2c 100644 --- a/vendor/github.com/quic-go/quic-go/oss-fuzz.sh +++ b/vendor/github.com/quic-go/quic-go/oss-fuzz.sh @@ -3,12 +3,12 @@ # Install Go manually, since oss-fuzz ships with an outdated Go version. # See https://github.com/google/oss-fuzz/pull/10643. export CXX="${CXX} -lresolv" # required by Go 1.20 -wget https://go.dev/dl/go1.22.0.linux-amd64.tar.gz \ +wget https://go.dev/dl/go1.23.0.linux-amd64.tar.gz \ && mkdir temp-go \ && rm -rf /root/.go/* \ - && tar -C temp-go/ -xzf go1.22.0.linux-amd64.tar.gz \ + && tar -C temp-go/ -xzf go1.23.0.linux-amd64.tar.gz \ && mv temp-go/go/* /root/.go/ \ - && rm -rf temp-go go1.22.0.linux-amd64.tar.gz + && rm -rf temp-go go1.23.0.linux-amd64.tar.gz ( # fuzz qpack diff --git a/vendor/github.com/quic-go/quic-go/packet_handler_map.go b/vendor/github.com/quic-go/quic-go/packet_handler_map.go index 7840202c..84841984 100644 --- a/vendor/github.com/quic-go/quic-go/packet_handler_map.go +++ b/vendor/github.com/quic-go/quic-go/packet_handler_map.go @@ -1,10 +1,6 @@ package quic import ( - "crypto/hmac" - "crypto/rand" - "crypto/sha256" - "hash" "io" "net" "sync" @@ -56,15 +52,12 @@ type packetHandlerMap struct { deleteRetiredConnsAfter time.Duration - statelessResetMutex sync.Mutex - statelessResetHasher hash.Hash - logger utils.Logger } var _ packetHandlerManager = &packetHandlerMap{} -func newPacketHandlerMap(key *StatelessResetKey, enqueueClosePacket func(closePacket), logger utils.Logger) *packetHandlerMap { +func newPacketHandlerMap(enqueueClosePacket func(closePacket), logger utils.Logger) *packetHandlerMap { h := &packetHandlerMap{ closeChan: make(chan struct{}), handlers: make(map[protocol.ConnectionID]packetHandler), @@ -73,9 +66,6 @@ func newPacketHandlerMap(key *StatelessResetKey, enqueueClosePacket func(closePa enqueueClosePacket: enqueueClosePacket, logger: logger, } - if key != nil { - h.statelessResetHasher = hmac.New(sha256.New, key[:]) - } if h.logger.Debug() { go h.logUsage() } @@ -236,20 +226,3 @@ func (h *packetHandlerMap) Close(e error) { h.mutex.Unlock() wg.Wait() } - -func (h *packetHandlerMap) GetStatelessResetToken(connID protocol.ConnectionID) protocol.StatelessResetToken { - var token protocol.StatelessResetToken - if h.statelessResetHasher == nil { - // Return a random stateless reset token. - // This token will be sent in the server's transport parameters. - // By using a random token, an off-path attacker won't be able to disrupt the connection. - rand.Read(token[:]) - return token - } - h.statelessResetMutex.Lock() - h.statelessResetHasher.Write(connID.Bytes()) - copy(token[:], h.statelessResetHasher.Sum(nil)) - h.statelessResetHasher.Reset() - h.statelessResetMutex.Unlock() - return token -} diff --git a/vendor/github.com/quic-go/quic-go/packet_packer.go b/vendor/github.com/quic-go/quic-go/packet_packer.go index e707734f..e84a6c02 100644 --- a/vendor/github.com/quic-go/quic-go/packet_packer.go +++ b/vendor/github.com/quic-go/quic-go/packet_packer.go @@ -5,8 +5,8 @@ import ( "encoding/binary" "errors" "fmt" - - "golang.org/x/exp/rand" + "math/rand/v2" + "time" "github.com/quic-go/quic-go/internal/ackhandler" "github.com/quic-go/quic-go/internal/handshake" @@ -18,12 +18,13 @@ import ( var errNothingToPack = errors.New("nothing to pack") type packer interface { - PackCoalescedPacket(onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) - PackAckOnlyPacket(maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) - AppendPacket(buf *packetBuffer, maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, error) - MaybePackProbePacket(protocol.EncryptionLevel, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) + PackCoalescedPacket(onlyAck bool, maxPacketSize protocol.ByteCount, now time.Time, v protocol.Version) (*coalescedPacket, error) + PackAckOnlyPacket(maxPacketSize protocol.ByteCount, now time.Time, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) + AppendPacket(_ *packetBuffer, maxPacketSize protocol.ByteCount, now time.Time, v protocol.Version) (shortHeaderPacket, error) + PackPTOProbePacket(_ protocol.EncryptionLevel, _ protocol.ByteCount, addPingIfEmpty bool, now time.Time, v protocol.Version) (*coalescedPacket, error) PackConnectionClose(*qerr.TransportError, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) PackApplicationClose(*qerr.ApplicationError, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) + PackPathProbePacket(protocol.ConnectionID, []ackhandler.Frame, protocol.Version) (shortHeaderPacket, *packetBuffer, error) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) SetToken([]byte) @@ -56,6 +57,7 @@ type shortHeaderPacket struct { Ack *wire.AckFrame Length protocol.ByteCount IsPathMTUProbePacket bool + IsPathProbePacket bool // used for logging DestConnID protocol.ConnectionID @@ -106,12 +108,11 @@ type sealingManager interface { type frameSource interface { HasData() bool - AppendStreamFrames([]ackhandler.StreamFrame, protocol.ByteCount, protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) - AppendControlFrames([]ackhandler.Frame, protocol.ByteCount, protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) + Append([]ackhandler.Frame, []ackhandler.StreamFrame, protocol.ByteCount, time.Time, protocol.Version) ([]ackhandler.Frame, []ackhandler.StreamFrame, protocol.ByteCount) } type ackFrameSource interface { - GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame + GetAckFrame(_ protocol.EncryptionLevel, now time.Time, onlyIfQueued bool) *wire.AckFrame } type packetPacker struct { @@ -121,8 +122,8 @@ type packetPacker struct { perspective protocol.Perspective cryptoSetup sealingManager - initialStream cryptoStream - handshakeStream cryptoStream + initialStream *cryptoStream + handshakeStream *cryptoStream token []byte @@ -141,7 +142,7 @@ var _ packer = &packetPacker{} func newPacketPacker( srcConnID protocol.ConnectionID, getDestConnID func() protocol.ConnectionID, - initialStream, handshakeStream cryptoStream, + initialStream, handshakeStream *cryptoStream, packetNumberManager packetNumberManager, retransmissionQueue *retransmissionQueue, cryptoSetup sealingManager, @@ -150,7 +151,7 @@ func newPacketPacker( datagramQueue *datagramQueue, perspective protocol.Perspective, ) *packetPacker { - var b [8]byte + var b [16]byte _, _ = crand.Read(b[:]) return &packetPacker{ @@ -164,7 +165,7 @@ func newPacketPacker( perspective: perspective, framer: framer, acks: acks, - rand: *rand.New(rand.NewSource(binary.BigEndian.Uint64(b[:]))), + rand: *rand.New(rand.NewPCG(binary.BigEndian.Uint64(b[:8]), binary.BigEndian.Uint64(b[8:]))), pnManager: packetNumberManager, } } @@ -269,17 +270,17 @@ func (p *packetPacker) packConnectionClose( if sealers[i] == nil { continue } - var paddingLen protocol.ByteCount - if encLevel == protocol.EncryptionInitial { - paddingLen = p.initialPaddingLen(payloads[i].frames, size, maxPacketSize) - } if encLevel == protocol.Encryption1RTT { - shp, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, keyPhase, payloads[i], paddingLen, maxPacketSize, sealers[i], false, v) + shp, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, keyPhase, payloads[i], 0, maxPacketSize, sealers[i], false, v) if err != nil { return nil, err } packet.shortHdrPacket = &shp } else { + var paddingLen protocol.ByteCount + if encLevel == protocol.EncryptionInitial { + paddingLen = p.initialPaddingLen(payloads[i].frames, size, maxPacketSize) + } longHdrPacket, err := p.appendLongHeaderPacket(buffer, hdrs[i], payloads[i], paddingLen, encLevel, sealers[i], v) if err != nil { return nil, err @@ -328,7 +329,7 @@ func (p *packetPacker) initialPaddingLen(frames []ackhandler.Frame, currentSize, // PackCoalescedPacket packs a new packet. // It packs an Initial / Handshake if there is data to send in these packet number spaces. // It should only be called before the handshake is confirmed. -func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) { +func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxSize protocol.ByteCount, now time.Time, v protocol.Version) (*coalescedPacket, error) { var ( initialHdr, handshakeHdr, zeroRTTHdr *wire.ExtendedHeader initialPayload, handshakePayload, zeroRTTPayload, oneRTTPayload payload @@ -342,7 +343,15 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. } var size protocol.ByteCount if initialSealer != nil { - initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), protocol.EncryptionInitial, onlyAck, true, v) + initialHdr, initialPayload = p.maybeGetCryptoPacket( + maxSize-protocol.ByteCount(initialSealer.Overhead()), + protocol.EncryptionInitial, + now, + false, + onlyAck, + true, + v, + ) if initialPayload.length > 0 { size += p.longHeaderPacketLength(initialHdr, initialPayload, v) + protocol.ByteCount(initialSealer.Overhead()) } @@ -350,14 +359,22 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. // Add a Handshake packet. var handshakeSealer sealer - if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { + if (onlyAck && size == 0) || (!onlyAck && size < maxSize-protocol.MinCoalescedPacketSize) { var err error handshakeSealer, err = p.cryptoSetup.GetHandshakeSealer() if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { return nil, err } if handshakeSealer != nil { - handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), protocol.EncryptionHandshake, onlyAck, size == 0, v) + handshakeHdr, handshakePayload = p.maybeGetCryptoPacket( + maxSize-size-protocol.ByteCount(handshakeSealer.Overhead()), + protocol.EncryptionHandshake, + now, + false, + onlyAck, + size == 0, + v, + ) if handshakePayload.length > 0 { s := p.longHeaderPacketLength(handshakeHdr, handshakePayload, v) + protocol.ByteCount(handshakeSealer.Overhead()) size += s @@ -370,7 +387,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. var oneRTTSealer handshake.ShortHeaderSealer var connID protocol.ConnectionID var kp protocol.KeyPhaseBit - if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { + if (onlyAck && size == 0) || (!onlyAck && size < maxSize-protocol.MinCoalescedPacketSize) { var err error oneRTTSealer, err = p.cryptoSetup.Get1RTTSealer() if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { @@ -381,7 +398,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. connID = p.getDestConnID() oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) hdrLen := wire.ShortHeaderLen(connID, oneRTTPacketNumberLen) - oneRTTPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, hdrLen, maxPacketSize-size, onlyAck, size == 0, v) + oneRTTPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, hdrLen, maxSize-size, onlyAck, size == 0, now, v) if oneRTTPayload.length > 0 { size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, oneRTTPayload) + protocol.ByteCount(oneRTTSealer.Overhead()) } @@ -392,7 +409,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. return nil, err } if zeroRTTSealer != nil { - zeroRTTHdr, zeroRTTPayload = p.maybeGetAppDataPacketFor0RTT(zeroRTTSealer, maxPacketSize-size, v) + zeroRTTHdr, zeroRTTPayload = p.maybeGetAppDataPacketFor0RTT(zeroRTTSealer, maxSize-size, now, v) if zeroRTTPayload.length > 0 { size += p.longHeaderPacketLength(zeroRTTHdr, zeroRTTPayload, v) + protocol.ByteCount(zeroRTTSealer.Overhead()) } @@ -410,7 +427,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. longHdrPackets: make([]*longHeaderPacket, 0, 3), } if initialPayload.length > 0 { - padding := p.initialPaddingLen(initialPayload.frames, size, maxPacketSize) + padding := p.initialPaddingLen(initialPayload.frames, size, maxSize) cont, err := p.appendLongHeaderPacket(buffer, initialHdr, initialPayload, padding, protocol.EncryptionInitial, initialSealer, v) if err != nil { return nil, err @@ -431,7 +448,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. } packet.longHdrPackets = append(packet.longHdrPackets, longHdrPacket) } else if oneRTTPayload.length > 0 { - shp, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, kp, oneRTTPayload, 0, maxPacketSize, oneRTTSealer, false, v) + shp, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, kp, oneRTTPayload, 0, maxSize, oneRTTSealer, false, v) if err != nil { return nil, err } @@ -442,19 +459,25 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. // PackAckOnlyPacket packs a packet containing only an ACK in the application data packet number space. // It should be called after the handshake is confirmed. -func (p *packetPacker) PackAckOnlyPacket(maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) { +func (p *packetPacker) PackAckOnlyPacket(maxSize protocol.ByteCount, now time.Time, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) { buf := getPacketBuffer() - packet, err := p.appendPacket(buf, true, maxPacketSize, v) + packet, err := p.appendPacket(buf, true, maxSize, now, v) return packet, buf, err } // AppendPacket packs a packet in the application data packet number space. // It should be called after the handshake is confirmed. -func (p *packetPacker) AppendPacket(buf *packetBuffer, maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, error) { - return p.appendPacket(buf, false, maxPacketSize, v) +func (p *packetPacker) AppendPacket(buf *packetBuffer, maxSize protocol.ByteCount, now time.Time, v protocol.Version) (shortHeaderPacket, error) { + return p.appendPacket(buf, false, maxSize, now, v) } -func (p *packetPacker) appendPacket(buf *packetBuffer, onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, error) { +func (p *packetPacker) appendPacket( + buf *packetBuffer, + onlyAck bool, + maxPacketSize protocol.ByteCount, + now time.Time, + v protocol.Version, +) (shortHeaderPacket, error) { sealer, err := p.cryptoSetup.Get1RTTSealer() if err != nil { return shortHeaderPacket{}, err @@ -462,7 +485,7 @@ func (p *packetPacker) appendPacket(buf *packetBuffer, onlyAck bool, maxPacketSi pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) connID := p.getDestConnID() hdrLen := wire.ShortHeaderLen(connID, pnLen) - pl := p.maybeGetShortHeaderPacket(sealer, hdrLen, maxPacketSize, onlyAck, true, v) + pl := p.maybeGetShortHeaderPacket(sealer, hdrLen, maxPacketSize, onlyAck, true, now, v) if pl.length == 0 { return shortHeaderPacket{}, errNothingToPack } @@ -471,9 +494,16 @@ func (p *packetPacker) appendPacket(buf *packetBuffer, onlyAck bool, maxPacketSi return p.appendShortHeaderPacket(buf, connID, pn, pnLen, kp, pl, 0, maxPacketSize, sealer, false, v) } -func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, encLevel protocol.EncryptionLevel, onlyAck, ackAllowed bool, v protocol.Version) (*wire.ExtendedHeader, payload) { +func (p *packetPacker) maybeGetCryptoPacket( + maxPacketSize protocol.ByteCount, + encLevel protocol.EncryptionLevel, + now time.Time, + addPingIfEmpty bool, + onlyAck, ackAllowed bool, + v protocol.Version, +) (*wire.ExtendedHeader, payload) { if onlyAck { - if ack := p.acks.GetAckFrame(encLevel, true); ack != nil { + if ack := p.acks.GetAckFrame(encLevel, now, true); ack != nil { return p.getLongHeader(encLevel, v), payload{ ack: ack, length: ack.Length(v), @@ -482,32 +512,33 @@ func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, en return nil, payload{} } - var s cryptoStream - var handler ackhandler.FrameHandler - var hasRetransmission bool + var s *cryptoStream //nolint:exhaustive // Initial and Handshake are the only two encryption levels here. switch encLevel { case protocol.EncryptionInitial: s = p.initialStream - handler = p.retransmissionQueue.InitialAckHandler() - hasRetransmission = p.retransmissionQueue.HasInitialData() case protocol.EncryptionHandshake: s = p.handshakeStream - handler = p.retransmissionQueue.HandshakeAckHandler() - hasRetransmission = p.retransmissionQueue.HasHandshakeData() } - hasData := s.HasData() + handler := p.retransmissionQueue.AckHandler(encLevel) + hasRetransmission := p.retransmissionQueue.HasData(encLevel) + var ack *wire.AckFrame if ackAllowed { - ack = p.acks.GetAckFrame(encLevel, !hasRetransmission && !hasData) + ack = p.acks.GetAckFrame(encLevel, now, !hasRetransmission && !hasData) } + var pl payload if !hasData && !hasRetransmission && ack == nil { - // nothing to send - return nil, payload{} + if !addPingIfEmpty { + // nothing to send + return nil, payload{} + } + ping := &wire.PingFrame{} + pl.frames = append(pl.frames, ackhandler.Frame{Frame: ping, Handler: emptyHandler{}}) + pl.length += ping.Length(v) } - var pl payload if ack != nil { pl.ack = ack pl.length = ack.Length(v) @@ -517,49 +548,54 @@ func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, en maxPacketSize -= hdr.GetLength(v) if hasRetransmission { for { - var f ackhandler.Frame - //nolint:exhaustive // 0-RTT packets can't contain any retransmission.s - switch encLevel { - case protocol.EncryptionInitial: - f.Frame = p.retransmissionQueue.GetInitialFrame(maxPacketSize, v) - f.Handler = p.retransmissionQueue.InitialAckHandler() - case protocol.EncryptionHandshake: - f.Frame = p.retransmissionQueue.GetHandshakeFrame(maxPacketSize, v) - f.Handler = p.retransmissionQueue.HandshakeAckHandler() - } - if f.Frame == nil { + frame := p.retransmissionQueue.GetFrame(encLevel, maxPacketSize, v) + if frame == nil { break } - pl.frames = append(pl.frames, f) - frameLen := f.Frame.Length(v) + pl.frames = append(pl.frames, ackhandler.Frame{ + Frame: frame, + Handler: p.retransmissionQueue.AckHandler(encLevel), + }) + frameLen := frame.Length(v) pl.length += frameLen maxPacketSize -= frameLen } } else if s.HasData() { cf := s.PopCryptoFrame(maxPacketSize) - pl.frames = []ackhandler.Frame{{Frame: cf, Handler: handler}} + pl.frames = append(pl.frames, ackhandler.Frame{Frame: cf, Handler: handler}) pl.length += cf.Length(v) } return hdr, pl } -func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxPacketSize protocol.ByteCount, v protocol.Version) (*wire.ExtendedHeader, payload) { +func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxSize protocol.ByteCount, now time.Time, v protocol.Version) (*wire.ExtendedHeader, payload) { if p.perspective != protocol.PerspectiveClient { return nil, payload{} } hdr := p.getLongHeader(protocol.Encryption0RTT, v) - maxPayloadSize := maxPacketSize - hdr.GetLength(v) - protocol.ByteCount(sealer.Overhead()) - return hdr, p.maybeGetAppDataPacket(maxPayloadSize, false, false, v) + maxPayloadSize := maxSize - hdr.GetLength(v) - protocol.ByteCount(sealer.Overhead()) + return hdr, p.maybeGetAppDataPacket(maxPayloadSize, false, false, now, v) } -func (p *packetPacker) maybeGetShortHeaderPacket(sealer handshake.ShortHeaderSealer, hdrLen protocol.ByteCount, maxPacketSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.Version) payload { +func (p *packetPacker) maybeGetShortHeaderPacket( + sealer handshake.ShortHeaderSealer, + hdrLen, maxPacketSize protocol.ByteCount, + onlyAck, ackAllowed bool, + now time.Time, + v protocol.Version, +) payload { maxPayloadSize := maxPacketSize - hdrLen - protocol.ByteCount(sealer.Overhead()) - return p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed, v) + return p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed, now, v) } -func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.Version) payload { - pl := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed, v) +func (p *packetPacker) maybeGetAppDataPacket( + maxPayloadSize protocol.ByteCount, + onlyAck, ackAllowed bool, + now time.Time, + v protocol.Version, +) payload { + pl := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed, now, v) // check if we have anything to send if len(pl.frames) == 0 && len(pl.streamFrames) == 0 { @@ -581,21 +617,26 @@ func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, return pl } -func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.Version) payload { +func (p *packetPacker) composeNextPacket( + maxPayloadSize protocol.ByteCount, + onlyAck, ackAllowed bool, + now time.Time, + v protocol.Version, +) payload { if onlyAck { - if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, true); ack != nil { + if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, now, true); ack != nil { return payload{ack: ack, length: ack.Length(v)} } return payload{} } hasData := p.framer.HasData() - hasRetransmission := p.retransmissionQueue.HasAppData() + hasRetransmission := p.retransmissionQueue.HasData(protocol.Encryption1RTT) var hasAck bool var pl payload if ackAllowed { - if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData); ack != nil { + if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, now, !hasRetransmission && !hasData); ack != nil { pl.ack = ack pl.length += ack.Length(v) hasAck = true @@ -605,7 +646,7 @@ func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAc if p.datagramQueue != nil { if f := p.datagramQueue.Peek(); f != nil { size := f.Length(v) - if size <= maxFrameSize-pl.length { // DATAGRAM frame fits + if size <= maxPayloadSize-pl.length { // DATAGRAM frame fits pl.frames = append(pl.frames, ackhandler.Frame{Frame: f}) pl.length += size p.datagramQueue.Pop() @@ -625,15 +666,15 @@ func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAc if hasRetransmission { for { - remainingLen := maxFrameSize - pl.length + remainingLen := maxPayloadSize - pl.length if remainingLen < protocol.MinStreamFrameSize { break } - f := p.retransmissionQueue.GetAppDataFrame(remainingLen, v) + f := p.retransmissionQueue.GetFrame(protocol.Encryption1RTT, remainingLen, v) if f == nil { break } - pl.frames = append(pl.frames, ackhandler.Frame{Frame: f, Handler: p.retransmissionQueue.AppDataAckHandler()}) + pl.frames = append(pl.frames, ackhandler.Frame{Frame: f, Handler: p.retransmissionQueue.AckHandler(protocol.Encryption1RTT)}) pl.length += f.Length(v) } } @@ -641,51 +682,37 @@ func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAc if hasData { var lengthAdded protocol.ByteCount startLen := len(pl.frames) - pl.frames, lengthAdded = p.framer.AppendControlFrames(pl.frames, maxFrameSize-pl.length, v) + pl.frames, pl.streamFrames, lengthAdded = p.framer.Append(pl.frames, pl.streamFrames, maxPayloadSize-pl.length, now, v) pl.length += lengthAdded // add handlers for the control frames that were added for i := startLen; i < len(pl.frames); i++ { + if pl.frames[i].Handler != nil { + continue + } switch pl.frames[i].Frame.(type) { case *wire.PathChallengeFrame, *wire.PathResponseFrame: // Path probing is currently not supported, therefore we don't need to set the OnAcked callback yet. // PATH_CHALLENGE and PATH_RESPONSE are never retransmitted. default: - pl.frames[i].Handler = p.retransmissionQueue.AppDataAckHandler() + // we might be packing a 0-RTT packet, but we need to use the 1-RTT ack handler anyway + pl.frames[i].Handler = p.retransmissionQueue.AckHandler(protocol.Encryption1RTT) } } - - pl.streamFrames, lengthAdded = p.framer.AppendStreamFrames(pl.streamFrames, maxFrameSize-pl.length, v) - pl.length += lengthAdded } return pl } -func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) { +func (p *packetPacker) PackPTOProbePacket( + encLevel protocol.EncryptionLevel, + maxPacketSize protocol.ByteCount, + addPingIfEmpty bool, + now time.Time, + v protocol.Version, +) (*coalescedPacket, error) { if encLevel == protocol.Encryption1RTT { - s, err := p.cryptoSetup.Get1RTTSealer() - if err != nil { - return nil, err - } - kp := s.KeyPhase() - connID := p.getDestConnID() - pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) - hdrLen := wire.ShortHeaderLen(connID, pnLen) - pl := p.maybeGetAppDataPacket(maxPacketSize-protocol.ByteCount(s.Overhead())-hdrLen, false, true, v) - if pl.length == 0 { - return nil, nil - } - buffer := getPacketBuffer() - packet := &coalescedPacket{buffer: buffer} - shp, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, 0, maxPacketSize, s, false, v) - if err != nil { - return nil, err - } - packet.shortHdrPacket = &shp - return packet, nil + return p.packPTOProbePacket1RTT(maxPacketSize, addPingIfEmpty, now, v) } - var hdr *wire.ExtendedHeader - var pl payload var sealer handshake.LongHeaderSealer //nolint:exhaustive // Probe packets are never sent for 0-RTT. switch encLevel { @@ -695,18 +722,24 @@ func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, m if err != nil { return nil, err } - hdr, pl = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionInitial, false, true, v) case protocol.EncryptionHandshake: var err error sealer, err = p.cryptoSetup.GetHandshakeSealer() if err != nil { return nil, err } - hdr, pl = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionHandshake, false, true, v) default: panic("unknown encryption level") } - + hdr, pl := p.maybeGetCryptoPacket( + maxPacketSize-protocol.ByteCount(sealer.Overhead()), + encLevel, + now, + addPingIfEmpty, + false, + true, + v, + ) if pl.length == 0 { return nil, nil } @@ -726,6 +759,34 @@ func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, m return packet, nil } +func (p *packetPacker) packPTOProbePacket1RTT(maxPacketSize protocol.ByteCount, addPingIfEmpty bool, now time.Time, v protocol.Version) (*coalescedPacket, error) { + s, err := p.cryptoSetup.Get1RTTSealer() + if err != nil { + return nil, err + } + kp := s.KeyPhase() + connID := p.getDestConnID() + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + hdrLen := wire.ShortHeaderLen(connID, pnLen) + pl := p.maybeGetAppDataPacket(maxPacketSize-protocol.ByteCount(s.Overhead())-hdrLen, false, true, now, v) + if pl.length == 0 { + if !addPingIfEmpty { + return nil, nil + } + ping := &wire.PingFrame{} + pl.frames = append(pl.frames, ackhandler.Frame{Frame: ping, Handler: emptyHandler{}}) + pl.length += ping.Length(v) + } + buffer := getPacketBuffer() + packet := &coalescedPacket{buffer: buffer} + shp, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, 0, maxPacketSize, s, false, v) + if err != nil { + return nil, err + } + packet.shortHdrPacket = &shp + return packet, nil +} + func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) { pl := payload{ frames: []ackhandler.Frame{ping}, @@ -744,6 +805,30 @@ func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.B return packet, buffer, err } +func (p *packetPacker) PackPathProbePacket(connID protocol.ConnectionID, frames []ackhandler.Frame, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) { + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + buf := getPacketBuffer() + s, err := p.cryptoSetup.Get1RTTSealer() + if err != nil { + return shortHeaderPacket{}, nil, err + } + var l protocol.ByteCount + for _, f := range frames { + l += f.Frame.Length(v) + } + payload := payload{ + frames: frames, + length: l, + } + padding := protocol.MinInitialPacketSize - p.shortHeaderPacketLength(connID, pnLen, payload) - protocol.ByteCount(s.Overhead()) + packet, err := p.appendShortHeaderPacket(buf, connID, pn, pnLen, s.KeyPhase(), payload, padding, protocol.MinInitialPacketSize, s, false, v) + if err != nil { + return shortHeaderPacket{}, nil, err + } + packet.IsPathProbePacket = true + return packet, buf, err +} + func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel, v protocol.Version) *wire.ExtendedHeader { pn, pnLen := p.pnManager.PeekPacketNumber(encLevel) hdr := &wire.ExtendedHeader{ @@ -909,3 +994,10 @@ func (p *packetPacker) encryptPacket(raw []byte, sealer sealer, pn protocol.Pack func (p *packetPacker) SetToken(token []byte) { p.token = token } + +type emptyHandler struct{} + +var _ ackhandler.FrameHandler = emptyHandler{} + +func (emptyHandler) OnAcked(wire.Frame) {} +func (emptyHandler) OnLost(wire.Frame) {} diff --git a/vendor/github.com/quic-go/quic-go/packet_unpacker.go b/vendor/github.com/quic-go/quic-go/packet_unpacker.go index 1034aab1..2f607fbd 100644 --- a/vendor/github.com/quic-go/quic-go/packet_unpacker.go +++ b/vendor/github.com/quic-go/quic-go/packet_unpacker.go @@ -1,7 +1,6 @@ package quic import ( - "bytes" "fmt" "time" @@ -53,7 +52,7 @@ func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int) *packetU // If the reserved bits are invalid, the error is wire.ErrInvalidReservedBits. // If any other error occurred when parsing the header, the error is of type headerParseError. // If decrypting the payload fails for any reason, the error is the error returned by the AEAD. -func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.Version) (*unpackedPacket, error) { +func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, data []byte) (*unpackedPacket, error) { var encLevel protocol.EncryptionLevel var extHdr *wire.ExtendedHeader var decrypted []byte @@ -65,7 +64,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) if err != nil { return nil, err } @@ -75,7 +74,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) if err != nil { return nil, err } @@ -85,7 +84,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) if err != nil { return nil, err } @@ -125,8 +124,8 @@ func (u *packetUnpacker) UnpackShortHeader(rcvTime time.Time, data []byte) (prot return pn, pnLen, kp, decrypted, nil } -func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte, v protocol.Version) (*wire.ExtendedHeader, []byte, error) { - extHdr, parseErr := u.unpackLongHeader(opener, hdr, data, v) +func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, []byte, error) { + extHdr, parseErr := u.unpackLongHeader(opener, hdr, data) // If the reserved bits are set incorrectly, we still need to continue unpacking. // This avoids a timing side-channel, which otherwise might allow an attacker // to gain information about the header encryption. @@ -187,21 +186,18 @@ func (u *packetUnpacker) unpackShortHeader(hd headerDecryptor, data []byte) (int } // The error is either nil, a wire.ErrInvalidReservedBits or of type headerParseError. -func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.Version) (*wire.ExtendedHeader, error) { - extHdr, err := unpackLongHeader(hd, hdr, data, v) +func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) { + extHdr, err := unpackLongHeader(hd, hdr, data) if err != nil && err != wire.ErrInvalidReservedBits { return nil, &headerParseError{err: err} } return extHdr, err } -func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.Version) (*wire.ExtendedHeader, error) { - r := bytes.NewReader(data) - +func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) { hdrLen := hdr.ParsedLen() if protocol.ByteCount(len(data)) < hdrLen+4+16 { - //nolint:stylecheck - return nil, fmt.Errorf("Packet too small. Expected at least 20 bytes after the header, got %d", protocol.ByteCount(len(data))-hdrLen) + return nil, fmt.Errorf("packet too small, expected at least 20 bytes after the header, got %d", protocol.ByteCount(len(data))-hdrLen) } // The packet number can be up to 4 bytes long, but we won't know the length until we decrypt it. // 1. save a copy of the 4 bytes @@ -214,7 +210,7 @@ func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v proto data[hdrLen:hdrLen+4], ) // 3. parse the header (and learn the actual length of the packet number) - extHdr, parseErr := hdr.ParseExtended(r, v) + extHdr, parseErr := hdr.ParseExtended(data) if parseErr != nil && parseErr != wire.ErrInvalidReservedBits { return nil, parseErr } diff --git a/vendor/github.com/quic-go/quic-go/path_manager.go b/vendor/github.com/quic-go/quic-go/path_manager.go new file mode 100644 index 00000000..34096654 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/path_manager.go @@ -0,0 +1,205 @@ +package quic + +import ( + "crypto/rand" + "net" + "slices" + "time" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" +) + +type pathID int64 + +const invalidPathID pathID = -1 + +// Maximum number of paths to keep track of. +// If the peer probes another path (before the pathTimeout of an existing path expires), +// this probing attempt is ignored. +const maxPaths = 3 + +// If no packet is received for a path for pathTimeout, +// the path can be evicted when the peer probes another path. +// This prevents an attacker from churning through paths by duplicating packets and +// sending them with spoofed source addresses. +const pathTimeout = 5 * time.Second + +type path struct { + id pathID + addr net.Addr + lastPacketTime time.Time + pathChallenge [8]byte + validated bool + rcvdNonProbing bool +} + +type pathManager struct { + nextPathID pathID + // ordered by lastPacketTime, with the most recently used path at the end + paths []*path + + getConnID func(pathID) (_ protocol.ConnectionID, ok bool) + retireConnID func(pathID) + + logger utils.Logger +} + +func newPathManager( + getConnID func(pathID) (_ protocol.ConnectionID, ok bool), + retireConnID func(pathID), + logger utils.Logger, +) *pathManager { + return &pathManager{ + paths: make([]*path, 0, maxPaths+1), + getConnID: getConnID, + retireConnID: retireConnID, + logger: logger, + } +} + +// Returns a path challenge frame if one should be sent. +// May return nil. +func (pm *pathManager) HandlePacket( + remoteAddr net.Addr, + t time.Time, + pathChallenge *wire.PathChallengeFrame, // may be nil if the packet didn't contain a PATH_CHALLENGE + isNonProbing bool, +) (_ protocol.ConnectionID, _ []ackhandler.Frame, shouldSwitch bool) { + var p *path + for i, path := range pm.paths { + if addrsEqual(path.addr, remoteAddr) { + p = path + p.lastPacketTime = t + // already sent a PATH_CHALLENGE for this path + if isNonProbing { + path.rcvdNonProbing = true + } + if pm.logger.Debug() { + pm.logger.Debugf("received packet for path %s that was already probed, validated: %t", remoteAddr, path.validated) + } + shouldSwitch = path.validated && path.rcvdNonProbing + if i != len(pm.paths)-1 { + // move the path to the end of the list + pm.paths = slices.Delete(pm.paths, i, i+1) + pm.paths = append(pm.paths, p) + } + if pathChallenge == nil { + return protocol.ConnectionID{}, nil, shouldSwitch + } + } + } + + if len(pm.paths) >= maxPaths { + if pm.paths[0].lastPacketTime.Add(pathTimeout).After(t) { + if pm.logger.Debug() { + pm.logger.Debugf("received packet for previously unseen path %s, but already have %d paths", remoteAddr, len(pm.paths)) + } + return protocol.ConnectionID{}, nil, shouldSwitch + } + // evict the oldest path, if the last packet was received more than pathTimeout ago + pm.retireConnID(pm.paths[0].id) + pm.paths = pm.paths[1:] + } + + var pathID pathID + if p != nil { + pathID = p.id + } else { + pathID = pm.nextPathID + } + + // previously unseen path, initiate path validation by sending a PATH_CHALLENGE + connID, ok := pm.getConnID(pathID) + if !ok { + pm.logger.Debugf("skipping validation of new path %s since no connection ID is available", remoteAddr) + return protocol.ConnectionID{}, nil, shouldSwitch + } + + frames := make([]ackhandler.Frame, 0, 2) + if p == nil { + var pathChallengeData [8]byte + rand.Read(pathChallengeData[:]) + p = &path{ + id: pm.nextPathID, + addr: remoteAddr, + lastPacketTime: t, + rcvdNonProbing: isNonProbing, + pathChallenge: pathChallengeData, + } + pm.nextPathID++ + pm.paths = append(pm.paths, p) + frames = append(frames, ackhandler.Frame{ + Frame: &wire.PathChallengeFrame{Data: p.pathChallenge}, + Handler: (*pathManagerAckHandler)(pm), + }) + pm.logger.Debugf("enqueueing PATH_CHALLENGE for new path %s", remoteAddr) + } + if pathChallenge != nil { + frames = append(frames, ackhandler.Frame{ + Frame: &wire.PathResponseFrame{Data: pathChallenge.Data}, + Handler: (*pathManagerAckHandler)(pm), + }) + } + return connID, frames, shouldSwitch +} + +func (pm *pathManager) HandlePathResponseFrame(f *wire.PathResponseFrame) { + for _, p := range pm.paths { + if f.Data == p.pathChallenge { + // path validated + p.validated = true + pm.logger.Debugf("path %s validated", p.addr) + break + } + } +} + +// SwitchToPath is called when the connection switches to a new path +func (pm *pathManager) SwitchToPath(addr net.Addr) { + // retire all other paths + for _, path := range pm.paths { + if addrsEqual(path.addr, addr) { + pm.logger.Debugf("switching to path %d (%s)", path.id, addr) + continue + } + pm.retireConnID(path.id) + } + clear(pm.paths) + pm.paths = pm.paths[:0] +} + +type pathManagerAckHandler pathManager + +var _ ackhandler.FrameHandler = &pathManagerAckHandler{} + +// Acknowledging the frame doesn't validate the path, only receiving the PATH_RESPONSE does. +func (pm *pathManagerAckHandler) OnAcked(f wire.Frame) {} + +func (pm *pathManagerAckHandler) OnLost(f wire.Frame) { + pc, ok := f.(*wire.PathChallengeFrame) + if !ok { + return + } + for i, path := range pm.paths { + if path.pathChallenge == pc.Data { + pm.paths = slices.Delete(pm.paths, i, i+1) + pm.retireConnID(path.id) + break + } + } +} + +func addrsEqual(addr1, addr2 net.Addr) bool { + if addr1 == nil || addr2 == nil { + return false + } + a1, ok1 := addr1.(*net.UDPAddr) + a2, ok2 := addr2.(*net.UDPAddr) + if ok1 && ok2 { + return a1.IP.Equal(a2.IP) && a1.Port == a2.Port + } + return addr1.String() == addr2.String() +} diff --git a/vendor/github.com/quic-go/quic-go/path_manager_outgoing.go b/vendor/github.com/quic-go/quic-go/path_manager_outgoing.go new file mode 100644 index 00000000..595eda23 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/path_manager_outgoing.go @@ -0,0 +1,311 @@ +package quic + +import ( + "context" + "crypto/rand" + "errors" + "slices" + "sync" + "sync/atomic" + "time" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" +) + +var ( + // ErrPathClosed is returned when trying to switch to a path that has been closed. + ErrPathClosed = errors.New("path closed") + // ErrPathNotValidated is returned when trying to use a path before path probing has completed. + ErrPathNotValidated = errors.New("path not yet validated") +) + +var errPathDoesNotExist = errors.New("path does not exist") + +// Path is a network path. +type Path struct { + id pathID + pathManager *pathManagerOutgoing + tr *Transport + initialRTT time.Duration + + enablePath func() + validated atomic.Bool + abandon chan struct{} +} + +func (p *Path) Probe(ctx context.Context) error { + path := p.pathManager.addPath(p, p.enablePath) + + p.pathManager.enqueueProbe(p) + nextProbeDur := p.initialRTT + var timer *time.Timer + var timerChan <-chan time.Time + for { + select { + case <-ctx.Done(): + return context.Cause(ctx) + case <-path.Validated(): + p.validated.Store(true) + return nil + case <-timerChan: + p.pathManager.enqueueProbe(p) + case <-path.ProbeSent(): + case <-p.abandon: + return ErrPathClosed + } + + if timer != nil { + timer.Stop() + } + timer = time.NewTimer(nextProbeDur) + timerChan = timer.C + nextProbeDur *= 2 // exponential backoff + } +} + +// Switch switches the QUIC connection to this path. +// It immediately stops sending on the old path, and sends on this new path. +func (p *Path) Switch() error { + if err := p.pathManager.switchToPath(p.id); err != nil { + switch { + case errors.Is(err, ErrPathNotValidated): + return err + case errors.Is(err, errPathDoesNotExist) && !p.validated.Load(): + select { + case <-p.abandon: + return ErrPathClosed + default: + return ErrPathNotValidated + } + default: + return ErrPathClosed + } + } + return nil +} + +// Close abandons a path. +// It is not possible to close the path that’s currently active. +// After closing, it is not possible to probe this path again. +func (p *Path) Close() error { + select { + case <-p.abandon: + return nil + default: + } + + if err := p.pathManager.removePath(p.id); err != nil { + return err + } + close(p.abandon) + return nil +} + +type pathOutgoing struct { + pathChallenges [][8]byte // length is implicitly limited by exponential backoff + tr *Transport + isValidated bool + probeSent chan struct{} // receives when a PATH_CHALLENGE is sent + validated chan struct{} // closed when the path the corresponding PATH_RESPONSE is received + enablePath func() +} + +func (p *pathOutgoing) ProbeSent() <-chan struct{} { return p.probeSent } +func (p *pathOutgoing) Validated() <-chan struct{} { return p.validated } + +type pathManagerOutgoing struct { + getConnID func(pathID) (_ protocol.ConnectionID, ok bool) + retireConnID func(pathID) + scheduleSending func() + + mx sync.Mutex + activePath pathID + pathsToProbe []pathID + paths map[pathID]*pathOutgoing + nextPathID pathID + pathToSwitchTo *pathOutgoing +} + +func newPathManagerOutgoing( + getConnID func(pathID) (_ protocol.ConnectionID, ok bool), + retireConnID func(pathID), + scheduleSending func(), +) *pathManagerOutgoing { + return &pathManagerOutgoing{ + activePath: 0, // at initialization time, we're guaranteed to be using the handshake path + nextPathID: 1, + getConnID: getConnID, + retireConnID: retireConnID, + scheduleSending: scheduleSending, + paths: make(map[pathID]*pathOutgoing, 4), + } +} + +func (pm *pathManagerOutgoing) addPath(p *Path, enablePath func()) *pathOutgoing { + pm.mx.Lock() + defer pm.mx.Unlock() + + // path might already exist, and just being re-probed + if existingPath, ok := pm.paths[p.id]; ok { + existingPath.validated = make(chan struct{}) + return existingPath + } + + path := &pathOutgoing{ + tr: p.tr, + probeSent: make(chan struct{}, 1), + validated: make(chan struct{}), + enablePath: enablePath, + } + pm.paths[p.id] = path + return path +} + +func (pm *pathManagerOutgoing) enqueueProbe(p *Path) { + pm.mx.Lock() + pm.pathsToProbe = append(pm.pathsToProbe, p.id) + pm.mx.Unlock() + pm.scheduleSending() +} + +func (pm *pathManagerOutgoing) removePath(id pathID) error { + if err := pm.removePathImpl(id); err != nil { + return err + } + pm.scheduleSending() + return nil +} + +func (pm *pathManagerOutgoing) removePathImpl(id pathID) error { + pm.mx.Lock() + defer pm.mx.Unlock() + + if id == pm.activePath { + return errors.New("cannot close active path") + } + p, ok := pm.paths[id] + if !ok { + return nil + } + if len(p.pathChallenges) > 0 { + pm.retireConnID(id) + } + delete(pm.paths, id) + return nil +} + +func (pm *pathManagerOutgoing) switchToPath(id pathID) error { + pm.mx.Lock() + defer pm.mx.Unlock() + + p, ok := pm.paths[id] + if !ok { + return errPathDoesNotExist + } + if !p.isValidated { + return ErrPathNotValidated + } + pm.pathToSwitchTo = p + pm.activePath = id + return nil +} + +func (pm *pathManagerOutgoing) NewPath(t *Transport, initialRTT time.Duration, enablePath func()) *Path { + pm.mx.Lock() + defer pm.mx.Unlock() + + id := pm.nextPathID + pm.nextPathID++ + return &Path{ + pathManager: pm, + id: id, + tr: t, + enablePath: enablePath, + initialRTT: initialRTT, + abandon: make(chan struct{}), + } +} + +func (pm *pathManagerOutgoing) NextPathToProbe() (_ protocol.ConnectionID, _ ackhandler.Frame, _ *Transport, hasPath bool) { + pm.mx.Lock() + defer pm.mx.Unlock() + + var p *pathOutgoing + id := invalidPathID + for _, pID := range pm.pathsToProbe { + var ok bool + p, ok = pm.paths[pID] + if ok { + id = pID + break + } + // if the path doesn't exist in the map, it might have been abandoned + pm.pathsToProbe = pm.pathsToProbe[1:] + } + if id == invalidPathID { + return protocol.ConnectionID{}, ackhandler.Frame{}, nil, false + } + + connID, ok := pm.getConnID(id) + if !ok { + return protocol.ConnectionID{}, ackhandler.Frame{}, nil, false + } + + var b [8]byte + _, _ = rand.Read(b[:]) + p.pathChallenges = append(p.pathChallenges, b) + + pm.pathsToProbe = pm.pathsToProbe[1:] + p.enablePath() + select { + case p.probeSent <- struct{}{}: + default: + } + frame := ackhandler.Frame{ + Frame: &wire.PathChallengeFrame{Data: b}, + Handler: (*pathManagerOutgoingAckHandler)(pm), + } + return connID, frame, p.tr, true +} + +func (pm *pathManagerOutgoing) HandlePathResponseFrame(f *wire.PathResponseFrame) { + pm.mx.Lock() + defer pm.mx.Unlock() + + for _, p := range pm.paths { + if slices.Contains(p.pathChallenges, f.Data) { + // path validated + if !p.isValidated { + // make sure that duplicate PATH_RESPONSE frames are ignored + p.isValidated = true + p.pathChallenges = nil + close(p.validated) + } + break + } + } +} + +func (pm *pathManagerOutgoing) ShouldSwitchPath() (*Transport, bool) { + pm.mx.Lock() + defer pm.mx.Unlock() + + if pm.pathToSwitchTo == nil { + return nil, false + } + p := pm.pathToSwitchTo + pm.pathToSwitchTo = nil + return p.tr, true +} + +type pathManagerOutgoingAckHandler pathManagerOutgoing + +var _ ackhandler.FrameHandler = &pathManagerOutgoingAckHandler{} + +// OnAcked is called when the PATH_CHALLENGE is acked. +// This doesn't validate the path, only receiving the PATH_RESPONSE does. +func (pm *pathManagerOutgoingAckHandler) OnAcked(wire.Frame) {} + +func (pm *pathManagerOutgoingAckHandler) OnLost(wire.Frame) {} diff --git a/vendor/github.com/quic-go/quic-go/quicvarint/io.go b/vendor/github.com/quic-go/quic-go/quicvarint/io.go index 9368d1c5..27def409 100644 --- a/vendor/github.com/quic-go/quic-go/quicvarint/io.go +++ b/vendor/github.com/quic-go/quic-go/quicvarint/io.go @@ -31,7 +31,7 @@ func NewReader(r io.Reader) Reader { func (r *byteReader) ReadByte() (byte, error) { var b [1]byte - n, err := r.Reader.Read(b[:]) + n, err := r.Read(b[:]) if n == 1 && err == io.EOF { err = nil } @@ -63,6 +63,6 @@ func NewWriter(w io.Writer) Writer { } func (w *byteWriter) WriteByte(c byte) error { - _, err := w.Writer.Write([]byte{c}) + _, err := w.Write([]byte{c}) return err } diff --git a/vendor/github.com/quic-go/quic-go/quicvarint/varint.go b/vendor/github.com/quic-go/quic-go/quicvarint/varint.go index 9a22e334..f095e298 100644 --- a/vendor/github.com/quic-go/quic-go/quicvarint/varint.go +++ b/vendor/github.com/quic-go/quic-go/quicvarint/varint.go @@ -125,17 +125,18 @@ func AppendWithLen(b []byte, i uint64, length int) []byte { if l > length { panic(fmt.Sprintf("cannot encode %d in %d bytes", i, length)) } - if length == 2 { + switch length { + case 2: b = append(b, 0b01000000) - } else if length == 4 { + case 4: b = append(b, 0b10000000) - } else if length == 8 { + case 8: b = append(b, 0b11000000) } - for j := 1; j < length-l; j++ { + for range length - l - 1 { b = append(b, 0) } - for j := 0; j < l; j++ { + for j := range l { b = append(b, uint8(i>>(8*(l-1-j)))) } return b diff --git a/vendor/github.com/quic-go/quic-go/receive_stream.go b/vendor/github.com/quic-go/quic-go/receive_stream.go index 19759ad9..192b92f7 100644 --- a/vendor/github.com/quic-go/quic-go/receive_stream.go +++ b/vendor/github.com/quic-go/quic-go/receive_stream.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/quic-go/quic-go/internal/ackhandler" "github.com/quic-go/quic-go/internal/flowcontrol" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/qerr" @@ -16,10 +17,9 @@ import ( type receiveStreamI interface { ReceiveStream - handleStreamFrame(*wire.StreamFrame) error - handleResetStreamFrame(*wire.ResetStreamFrame) error + handleStreamFrame(*wire.StreamFrame, time.Time) error + handleResetStreamFrame(*wire.ResetStreamFrame, time.Time) error closeForShutdown(error) - getWindowUpdate() protocol.ByteCount } type receiveStream struct { @@ -37,6 +37,9 @@ type receiveStream struct { readPosInFrame int currentFrameIsLast bool // is the currentFrame the last frame on this stream + queuedStopSending bool + queuedMaxStreamData bool + // Set once we read the io.EOF or the cancellation error. // Note that for local cancellations, this doesn't necessarily mean that we know the final offset yet. errorRead bool @@ -54,8 +57,9 @@ type receiveStream struct { } var ( - _ ReceiveStream = &receiveStream{} - _ receiveStreamI = &receiveStream{} + _ ReceiveStream = &receiveStream{} + _ receiveStreamI = &receiveStream{} + _ streamControlFrameGetter = &receiveStream{} ) func newReceiveStream( @@ -87,13 +91,19 @@ func (s *receiveStream) Read(p []byte) (int, error) { defer func() { <-s.readOnce }() s.mutex.Lock() - n, err := s.readImpl(p) + queuedStreamWindowUpdate, queuedConnWindowUpdate, n, err := s.readImpl(p) completed := s.isNewlyCompleted() s.mutex.Unlock() if completed { s.sender.onStreamCompleted(s.streamID) } + if queuedStreamWindowUpdate { + s.sender.onHasStreamControlFrame(s.streamID, s) + } + if queuedConnWindowUpdate { + s.sender.onHasConnectionData() + } return n, err } @@ -118,17 +128,17 @@ func (s *receiveStream) isNewlyCompleted() bool { return false } -func (s *receiveStream) readImpl(p []byte) (int, error) { +func (s *receiveStream) readImpl(p []byte) (hasStreamWindowUpdate bool, hasConnWindowUpdate bool, _ int, _ error) { if s.currentFrameIsLast && s.currentFrame == nil { s.errorRead = true - return 0, io.EOF + return false, false, 0, io.EOF } if s.cancelledRemotely || s.cancelledLocally { s.errorRead = true - return 0, s.cancelErr + return false, false, 0, s.cancelErr } if s.closeForShutdownErr != nil { - return 0, s.closeForShutdownErr + return false, false, 0, s.closeForShutdownErr } var bytesRead int @@ -138,23 +148,23 @@ func (s *receiveStream) readImpl(p []byte) (int, error) { s.dequeueNextFrame() } if s.currentFrame == nil && bytesRead > 0 { - return bytesRead, s.closeForShutdownErr + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, s.closeForShutdownErr } for { // Stop waiting on errors if s.closeForShutdownErr != nil { - return bytesRead, s.closeForShutdownErr + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, s.closeForShutdownErr } if s.cancelledRemotely || s.cancelledLocally { s.errorRead = true - return 0, s.cancelErr + return hasStreamWindowUpdate, hasConnWindowUpdate, 0, s.cancelErr } deadline := s.deadline if !deadline.IsZero() { if !time.Now().Before(deadline) { - return bytesRead, errDeadline + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, errDeadline } if deadlineTimer == nil { deadlineTimer = utils.NewTimer() @@ -184,10 +194,10 @@ func (s *receiveStream) readImpl(p []byte) (int, error) { } if bytesRead > len(p) { - return bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p)) + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p)) } if s.readPosInFrame > len(s.currentFrame) { - return bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, len(s.currentFrame)) + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, len(s.currentFrame)) } m := copy(p[bytesRead:], s.currentFrame[s.readPosInFrame:]) @@ -197,7 +207,14 @@ func (s *receiveStream) readImpl(p []byte) (int, error) { // when a RESET_STREAM was received, the flow controller was already // informed about the final byteOffset for this stream if !s.cancelledRemotely { - s.flowController.AddBytesRead(protocol.ByteCount(m)) + hasStream, hasConn := s.flowController.AddBytesRead(protocol.ByteCount(m)) + if hasStream { + s.queuedMaxStreamData = true + hasStreamWindowUpdate = true + } + if hasConn { + hasConnWindowUpdate = true + } } if s.readPosInFrame >= len(s.currentFrame) && s.currentFrameIsLast { @@ -206,10 +223,10 @@ func (s *receiveStream) readImpl(p []byte) (int, error) { s.currentFrameDone() } s.errorRead = true - return bytesRead, io.EOF + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, io.EOF } } - return bytesRead, nil + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, nil } func (s *receiveStream) dequeueNextFrame() { @@ -225,35 +242,39 @@ func (s *receiveStream) dequeueNextFrame() { func (s *receiveStream) CancelRead(errorCode StreamErrorCode) { s.mutex.Lock() - s.cancelReadImpl(errorCode) + queuedNewControlFrame := s.cancelReadImpl(errorCode) completed := s.isNewlyCompleted() s.mutex.Unlock() + if queuedNewControlFrame { + s.sender.onHasStreamControlFrame(s.streamID, s) + } if completed { s.flowController.Abandon() s.sender.onStreamCompleted(s.streamID) } } -func (s *receiveStream) cancelReadImpl(errorCode qerr.StreamErrorCode) { +func (s *receiveStream) cancelReadImpl(errorCode qerr.StreamErrorCode) (queuedNewControlFrame bool) { if s.cancelledLocally { // duplicate call to CancelRead - return + return false + } + if s.closeForShutdownErr != nil { + return false } s.cancelledLocally = true if s.errorRead || s.cancelledRemotely { - return + return false } + s.queuedStopSending = true s.cancelErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: false} s.signalRead() - s.sender.queueControlFrame(&wire.StopSendingFrame{ - StreamID: s.streamID, - ErrorCode: errorCode, - }) + return true } -func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame) error { +func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame, now time.Time) error { s.mutex.Lock() - err := s.handleStreamFrameImpl(frame) + err := s.handleStreamFrameImpl(frame, now) completed := s.isNewlyCompleted() s.mutex.Unlock() @@ -264,9 +285,9 @@ func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame) error { return err } -func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) error { +func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame, now time.Time) error { maxOffset := frame.Offset + frame.DataLen() - if err := s.flowController.UpdateHighestReceived(maxOffset, frame.Fin); err != nil { + if err := s.flowController.UpdateHighestReceived(maxOffset, frame.Fin, now); err != nil { return err } if frame.Fin { @@ -282,9 +303,9 @@ func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) error { return nil } -func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) error { +func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame, now time.Time) error { s.mutex.Lock() - err := s.handleResetStreamFrameImpl(frame) + err := s.handleResetStreamFrameImpl(frame, now) completed := s.isNewlyCompleted() s.mutex.Unlock() @@ -294,11 +315,11 @@ func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) err return err } -func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) error { +func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame, now time.Time) error { if s.closeForShutdownErr != nil { return nil } - if err := s.flowController.UpdateHighestReceived(frame.FinalSize, true); err != nil { + if err := s.flowController.UpdateHighestReceived(frame.FinalSize, true, now); err != nil { return err } s.finalOffset = frame.FinalSize @@ -318,6 +339,29 @@ func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) return nil } +func (s *receiveStream) getControlFrame(now time.Time) (_ ackhandler.Frame, ok, hasMore bool) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if !s.queuedStopSending && !s.queuedMaxStreamData { + return ackhandler.Frame{}, false, false + } + if s.queuedStopSending { + s.queuedStopSending = false + return ackhandler.Frame{ + Frame: &wire.StopSendingFrame{StreamID: s.streamID, ErrorCode: s.cancelErr.ErrorCode}, + }, true, s.queuedMaxStreamData + } + + s.queuedMaxStreamData = false + return ackhandler.Frame{ + Frame: &wire.MaxStreamDataFrame{ + StreamID: s.streamID, + MaximumStreamData: s.flowController.GetWindowUpdate(now), + }, + }, true, false +} + func (s *receiveStream) SetReadDeadline(t time.Time) error { s.mutex.Lock() s.deadline = t @@ -336,10 +380,6 @@ func (s *receiveStream) closeForShutdown(err error) { s.signalRead() } -func (s *receiveStream) getWindowUpdate() protocol.ByteCount { - return s.flowController.GetWindowUpdate() -} - // signalRead performs a non-blocking send on the readChan func (s *receiveStream) signalRead() { select { diff --git a/vendor/github.com/quic-go/quic-go/retransmission_queue.go b/vendor/github.com/quic-go/quic-go/retransmission_queue.go index 14ae1e3b..323af5b0 100644 --- a/vendor/github.com/quic-go/quic-go/retransmission_queue.go +++ b/vendor/github.com/quic-go/quic-go/retransmission_queue.go @@ -9,126 +9,106 @@ import ( "github.com/quic-go/quic-go/internal/wire" ) +type framesToRetransmit struct { + crypto []*wire.CryptoFrame + other []wire.Frame +} + type retransmissionQueue struct { - initial []wire.Frame - initialCryptoData []*wire.CryptoFrame - - handshake []wire.Frame - handshakeCryptoData []*wire.CryptoFrame - - appData []wire.Frame + initial *framesToRetransmit + handshake *framesToRetransmit + appData framesToRetransmit } func newRetransmissionQueue() *retransmissionQueue { - return &retransmissionQueue{} -} - -// AddPing queues a ping. -// It is used when a probe packet needs to be sent -func (q *retransmissionQueue) AddPing(encLevel protocol.EncryptionLevel) { - //nolint:exhaustive // Cannot send probe packets for 0-RTT. - switch encLevel { - case protocol.EncryptionInitial: - q.addInitial(&wire.PingFrame{}) - case protocol.EncryptionHandshake: - q.addHandshake(&wire.PingFrame{}) - case protocol.Encryption1RTT: - q.addAppData(&wire.PingFrame{}) - default: - panic("unexpected encryption level") + return &retransmissionQueue{ + initial: &framesToRetransmit{}, + handshake: &framesToRetransmit{}, } } func (q *retransmissionQueue) addInitial(f wire.Frame) { - if cf, ok := f.(*wire.CryptoFrame); ok { - q.initialCryptoData = append(q.initialCryptoData, cf) + if q.initial == nil { return } - q.initial = append(q.initial, f) + if cf, ok := f.(*wire.CryptoFrame); ok { + q.initial.crypto = append(q.initial.crypto, cf) + return + } + q.initial.other = append(q.initial.other, f) } func (q *retransmissionQueue) addHandshake(f wire.Frame) { - if cf, ok := f.(*wire.CryptoFrame); ok { - q.handshakeCryptoData = append(q.handshakeCryptoData, cf) + if q.handshake == nil { return } - q.handshake = append(q.handshake, f) -} - -func (q *retransmissionQueue) HasInitialData() bool { - return len(q.initialCryptoData) > 0 || len(q.initial) > 0 -} - -func (q *retransmissionQueue) HasHandshakeData() bool { - return len(q.handshakeCryptoData) > 0 || len(q.handshake) > 0 -} - -func (q *retransmissionQueue) HasAppData() bool { - return len(q.appData) > 0 + if cf, ok := f.(*wire.CryptoFrame); ok { + q.handshake.crypto = append(q.handshake.crypto, cf) + return + } + q.handshake.other = append(q.handshake.other, f) } func (q *retransmissionQueue) addAppData(f wire.Frame) { - if _, ok := f.(*wire.StreamFrame); ok { + switch f := f.(type) { + case *wire.StreamFrame: panic("STREAM frames are handled with their respective streams.") + case *wire.CryptoFrame: + q.appData.crypto = append(q.appData.crypto, f) + default: + q.appData.other = append(q.appData.other, f) } - q.appData = append(q.appData, f) } -func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount, v protocol.Version) wire.Frame { - if len(q.initialCryptoData) > 0 { - f := q.initialCryptoData[0] +func (q *retransmissionQueue) HasData(encLevel protocol.EncryptionLevel) bool { + //nolint:exhaustive // 0-RTT data is retransmitted in 1-RTT packets. + switch encLevel { + case protocol.EncryptionInitial: + return q.initial != nil && + (len(q.initial.crypto) > 0 || len(q.initial.other) > 0) + case protocol.EncryptionHandshake: + return q.handshake != nil && + (len(q.handshake.crypto) > 0 || len(q.handshake.other) > 0) + case protocol.Encryption1RTT: + return len(q.appData.crypto) > 0 || len(q.appData.other) > 0 + } + return false +} + +func (q *retransmissionQueue) GetFrame(encLevel protocol.EncryptionLevel, maxLen protocol.ByteCount, v protocol.Version) wire.Frame { + var r *framesToRetransmit + //nolint:exhaustive // 0-RTT data is retransmitted in 1-RTT packets. + switch encLevel { + case protocol.EncryptionInitial: + r = q.initial + case protocol.EncryptionHandshake: + r = q.handshake + case protocol.Encryption1RTT: + r = &q.appData + } + if r == nil { + return nil + } + + if len(r.crypto) > 0 { + f := r.crypto[0] newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v) if newFrame == nil && !needsSplit { // the whole frame fits - q.initialCryptoData = q.initialCryptoData[1:] + r.crypto = r.crypto[1:] return f } if newFrame != nil { // frame was split. Leave the original frame in the queue. return newFrame } } - if len(q.initial) == 0 { + if len(r.other) == 0 { return nil } - f := q.initial[0] + f := r.other[0] if f.Length(v) > maxLen { return nil } - q.initial = q.initial[1:] - return f -} - -func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount, v protocol.Version) wire.Frame { - if len(q.handshakeCryptoData) > 0 { - f := q.handshakeCryptoData[0] - newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v) - if newFrame == nil && !needsSplit { // the whole frame fits - q.handshakeCryptoData = q.handshakeCryptoData[1:] - return f - } - if newFrame != nil { // frame was split. Leave the original frame in the queue. - return newFrame - } - } - if len(q.handshake) == 0 { - return nil - } - f := q.handshake[0] - if f.Length(v) > maxLen { - return nil - } - q.handshake = q.handshake[1:] - return f -} - -func (q *retransmissionQueue) GetAppDataFrame(maxLen protocol.ByteCount, v protocol.Version) wire.Frame { - if len(q.appData) == 0 { - return nil - } - f := q.appData[0] - if f.Length(v) > maxLen { - return nil - } - q.appData = q.appData[1:] + r.other = r.other[1:] return f } @@ -137,25 +117,23 @@ func (q *retransmissionQueue) DropPackets(encLevel protocol.EncryptionLevel) { switch encLevel { case protocol.EncryptionInitial: q.initial = nil - q.initialCryptoData = nil case protocol.EncryptionHandshake: q.handshake = nil - q.handshakeCryptoData = nil default: panic(fmt.Sprintf("unexpected encryption level: %s", encLevel)) } } -func (q *retransmissionQueue) InitialAckHandler() ackhandler.FrameHandler { - return (*retransmissionQueueInitialAckHandler)(q) -} - -func (q *retransmissionQueue) HandshakeAckHandler() ackhandler.FrameHandler { - return (*retransmissionQueueHandshakeAckHandler)(q) -} - -func (q *retransmissionQueue) AppDataAckHandler() ackhandler.FrameHandler { - return (*retransmissionQueueAppDataAckHandler)(q) +func (q *retransmissionQueue) AckHandler(encLevel protocol.EncryptionLevel) ackhandler.FrameHandler { + switch encLevel { + case protocol.EncryptionInitial: + return (*retransmissionQueueInitialAckHandler)(q) + case protocol.EncryptionHandshake: + return (*retransmissionQueueHandshakeAckHandler)(q) + case protocol.Encryption0RTT, protocol.Encryption1RTT: + return (*retransmissionQueueAppDataAckHandler)(q) + } + return nil } type retransmissionQueueInitialAckHandler retransmissionQueue diff --git a/vendor/github.com/quic-go/quic-go/send_conn.go b/vendor/github.com/quic-go/quic-go/send_conn.go index 498ed112..402520c6 100644 --- a/vendor/github.com/quic-go/quic-go/send_conn.go +++ b/vendor/github.com/quic-go/quic-go/send_conn.go @@ -2,6 +2,7 @@ package quic import ( "net" + "sync/atomic" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/utils" @@ -10,22 +11,29 @@ import ( // A sendConn allows sending using a simple Write() on a non-connected packet conn. type sendConn interface { Write(b []byte, gsoSize uint16, ecn protocol.ECN) error + WriteTo([]byte, net.Addr) error Close() error LocalAddr() net.Addr RemoteAddr() net.Addr + ChangeRemoteAddr(addr net.Addr, info packetInfo) capabilities() connCapabilities } +type remoteAddrInfo struct { + addr net.Addr + oob []byte +} + type sconn struct { rawConn - localAddr net.Addr - remoteAddr net.Addr + localAddr net.Addr + + remoteAddrInfo atomic.Pointer[remoteAddrInfo] logger utils.Logger - packetInfoOOB []byte // If GSO enabled, and we receive a GSO error for this remote address, GSO is disabled. gotGSOError bool // Used to catch the error sometimes returned by the first sendmsg call on Linux, @@ -49,22 +57,26 @@ func newSendConn(c rawConn, remote net.Addr, info packetInfo, logger utils.Logge // increase oob slice capacity, so we can add the UDP_SEGMENT and ECN control messages without allocating l := len(oob) oob = append(oob, make([]byte, 64)...)[:l] - return &sconn{ - rawConn: c, - localAddr: localAddr, - remoteAddr: remote, - packetInfoOOB: oob, - logger: logger, + sc := &sconn{ + rawConn: c, + localAddr: localAddr, + logger: logger, } + sc.remoteAddrInfo.Store(&remoteAddrInfo{ + addr: remote, + oob: oob, + }) + return sc } func (c *sconn) Write(p []byte, gsoSize uint16, ecn protocol.ECN) error { - err := c.writePacket(p, c.remoteAddr, c.packetInfoOOB, gsoSize, ecn) + ai := c.remoteAddrInfo.Load() + err := c.writePacket(p, ai.addr, ai.oob, gsoSize, ecn) if err != nil && isGSOError(err) { // disable GSO for future calls c.gotGSOError = true if c.logger.Debug() { - c.logger.Debugf("GSO failed when sending to %s", c.remoteAddr) + c.logger.Debugf("GSO failed when sending to %s", ai.addr) } // send out the packets one by one for len(p) > 0 { @@ -72,7 +84,7 @@ func (c *sconn) Write(p []byte, gsoSize uint16, ecn protocol.ECN) error { if l > int(gsoSize) { l = int(gsoSize) } - if err := c.writePacket(p[:l], c.remoteAddr, c.packetInfoOOB, 0, ecn); err != nil { + if err := c.writePacket(p[:l], ai.addr, ai.oob, 0, ecn); err != nil { return err } p = p[l:] @@ -91,6 +103,11 @@ func (c *sconn) writePacket(p []byte, addr net.Addr, oob []byte, gsoSize uint16, return err } +func (c *sconn) WriteTo(b []byte, addr net.Addr) error { + _, err := c.WritePacket(b, addr, nil, 0, protocol.ECNUnsupported) + return err +} + func (c *sconn) capabilities() connCapabilities { capabilities := c.rawConn.capabilities() if capabilities.GSO { @@ -99,5 +116,12 @@ func (c *sconn) capabilities() connCapabilities { return capabilities } -func (c *sconn) RemoteAddr() net.Addr { return c.remoteAddr } +func (c *sconn) ChangeRemoteAddr(addr net.Addr, info packetInfo) { + c.remoteAddrInfo.Store(&remoteAddrInfo{ + addr: addr, + oob: info.OOB(), + }) +} + +func (c *sconn) RemoteAddr() net.Addr { return c.remoteAddrInfo.Load().addr } func (c *sconn) LocalAddr() net.Addr { return c.localAddr } diff --git a/vendor/github.com/quic-go/quic-go/send_queue.go b/vendor/github.com/quic-go/quic-go/send_queue.go index bde02334..d19762be 100644 --- a/vendor/github.com/quic-go/quic-go/send_queue.go +++ b/vendor/github.com/quic-go/quic-go/send_queue.go @@ -1,9 +1,14 @@ package quic -import "github.com/quic-go/quic-go/internal/protocol" +import ( + "net" + + "github.com/quic-go/quic-go/internal/protocol" +) type sender interface { Send(p *packetBuffer, gsoSize uint16, ecn protocol.ECN) + SendProbe(*packetBuffer, net.Addr) Run() error WouldBlock() bool Available() <-chan struct{} @@ -57,6 +62,10 @@ func (h *sendQueue) Send(p *packetBuffer, gsoSize uint16, ecn protocol.ECN) { } } +func (h *sendQueue) SendProbe(p *packetBuffer, addr net.Addr) { + h.conn.WriteTo(p.Data, addr) +} + func (h *sendQueue) WouldBlock() bool { return len(h.queue) == sendQueueCapacity } diff --git a/vendor/github.com/quic-go/quic-go/send_stream.go b/vendor/github.com/quic-go/quic-go/send_stream.go index 1df97db2..a588cc8a 100644 --- a/vendor/github.com/quic-go/quic-go/send_stream.go +++ b/vendor/github.com/quic-go/quic-go/send_stream.go @@ -18,7 +18,7 @@ type sendStreamI interface { SendStream handleStopSendingFrame(*wire.StopSendingFrame) hasData() bool - popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (frame ackhandler.StreamFrame, ok, hasMore bool) + popStreamFrame(protocol.ByteCount, protocol.Version) (_ ackhandler.StreamFrame, _ *wire.StreamDataBlockedFrame, hasMore bool) closeForShutdown(error) updateSendWindow(protocol.ByteCount) } @@ -26,7 +26,7 @@ type sendStreamI interface { type sendStream struct { mutex sync.Mutex - numOutstandingFrames int64 + numOutstandingFrames int64 // outstanding STREAM and RESET_STREAM frames retransmissionQueue []*wire.StreamFrame ctx context.Context @@ -37,8 +37,10 @@ type sendStream struct { writeOffset protocol.ByteCount - cancelWriteErr error - closeForShutdownErr error + // finalError is the error that is returned by Write. + // It can either be a cancellation error or the shutdown error. + finalError error + queuedResetStreamFrame *wire.ResetStreamFrame finishedWriting bool // set once Close() is called finSent bool // set when a STREAM_FRAME with FIN bit has been sent @@ -46,6 +48,8 @@ type sendStream struct { // This can happen because the application called CancelWrite, // or because Write returned the error (for remote cancellations). cancellationFlagged bool + cancelled bool // both local and remote cancellations + closedForShutdown bool // set by closeForShutdown completed bool // set when this stream has been reported to the streamSender as completed dataForWriting []byte // during a Write() call, this slice is the part of p that still needs to be sent out @@ -59,8 +63,9 @@ type sendStream struct { } var ( - _ SendStream = &sendStream{} - _ sendStreamI = &sendStream{} + _ SendStream = &sendStream{} + _ sendStreamI = &sendStream{} + _ streamControlFrameGetter = &sendStream{} ) func newSendStream( @@ -102,16 +107,15 @@ func (s *sendStream) write(p []byte) (bool /* is newly completed */, int, error) s.mutex.Lock() defer s.mutex.Unlock() + if s.finalError != nil { + if s.cancelled { + s.cancellationFlagged = true + } + return s.isNewlyCompleted(), 0, s.finalError + } if s.finishedWriting { return false, 0, fmt.Errorf("write on closed stream %d", s.streamID) } - if s.cancelWriteErr != nil { - s.cancellationFlagged = true - return s.isNewlyCompleted(), 0, s.cancelWriteErr - } - if s.closeForShutdownErr != nil { - return false, 0, s.closeForShutdownErr - } if !s.deadline.IsZero() && !time.Now().Before(s.deadline) { return false, 0, errDeadline } @@ -165,14 +169,14 @@ func (s *sendStream) write(p []byte) (bool /* is newly completed */, int, error) } deadlineTimer.Reset(deadline) } - if s.dataForWriting == nil || s.cancelWriteErr != nil || s.closeForShutdownErr != nil { + if s.dataForWriting == nil || s.finalError != nil { break } } s.mutex.Unlock() if !notifiedSender { - s.sender.onHasStreamData(s.streamID) // must be called without holding the mutex + s.sender.onHasStreamData(s.streamID, s) // must be called without holding the mutex notifiedSender = true } if copied { @@ -194,11 +198,11 @@ func (s *sendStream) write(p []byte) (bool /* is newly completed */, int, error) if bytesWritten == len(p) { return false, bytesWritten, nil } - if s.closeForShutdownErr != nil { - return false, bytesWritten, s.closeForShutdownErr - } else if s.cancelWriteErr != nil { - s.cancellationFlagged = true - return s.isNewlyCompleted(), bytesWritten, s.cancelWriteErr + if s.finalError != nil { + if s.cancelled { + s.cancellationFlagged = true + } + return s.isNewlyCompleted(), bytesWritten, s.finalError } return false, bytesWritten, nil } @@ -213,37 +217,37 @@ func (s *sendStream) canBufferStreamFrame() bool { // popStreamFrame returns the next STREAM frame that is supposed to be sent on this stream // maxBytes is the maximum length this frame (including frame header) will have. -func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (af ackhandler.StreamFrame, ok, hasMore bool) { +func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (_ ackhandler.StreamFrame, _ *wire.StreamDataBlockedFrame, hasMore bool) { s.mutex.Lock() - f, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes, v) + f, blocked, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes, v) if f != nil { s.numOutstandingFrames++ } s.mutex.Unlock() if f == nil { - return ackhandler.StreamFrame{}, false, hasMoreData + return ackhandler.StreamFrame{}, blocked, hasMoreData } return ackhandler.StreamFrame{ Frame: f, Handler: (*sendStreamAckHandler)(s), - }, true, hasMoreData + }, blocked, hasMoreData } -func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (*wire.StreamFrame, bool /* has more data to send */) { - if s.cancelWriteErr != nil || s.closeForShutdownErr != nil { - return nil, false +func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (_ *wire.StreamFrame, _ *wire.StreamDataBlockedFrame, hasMoreData bool) { + if s.finalError != nil { + return nil, nil, false } if len(s.retransmissionQueue) > 0 { f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes, v) if f != nil || hasMoreRetransmissions { if f == nil { - return nil, true + return nil, nil, true } // We always claim that we have more data to send. // This might be incorrect, in which case there'll be a spurious call to popStreamFrame in the future. - return f, true + return f, nil, true } } @@ -255,41 +259,45 @@ func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCoun Offset: s.writeOffset, DataLenPresent: true, Fin: true, - }, false + }, nil, false } - return nil, false + return nil, nil, false } sendWindow := s.flowController.SendWindowSize() if sendWindow == 0 { - if isBlocked, offset := s.flowController.IsNewlyBlocked(); isBlocked { - s.sender.queueControlFrame(&wire.StreamDataBlockedFrame{ - StreamID: s.streamID, - MaximumStreamData: offset, - }) - return nil, false - } - return nil, true + return nil, nil, true } f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow, v) - if dataLen := f.DataLen(); dataLen > 0 { + if f == nil { + return nil, nil, hasMoreData + } + if f.DataLen() > 0 { s.writeOffset += f.DataLen() s.flowController.AddBytesSent(f.DataLen()) } + var blocked *wire.StreamDataBlockedFrame + // If the entire send window is used, the stream might have become blocked on stream-level flow control. + // This is not guaranteed though, because the stream might also have been blocked on connection-level flow control. + if f.DataLen() == sendWindow && s.flowController.IsNewlyBlocked() { + blocked = &wire.StreamDataBlockedFrame{StreamID: s.streamID, MaximumStreamData: s.writeOffset} + } f.Fin = s.finishedWriting && s.dataForWriting == nil && s.nextFrame == nil && !s.finSent if f.Fin { s.finSent = true } - return f, hasMoreData + return f, blocked, hasMoreData } func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount, v protocol.Version) (*wire.StreamFrame, bool) { if s.nextFrame != nil { + maxDataLen := min(sendWindow, s.nextFrame.MaxDataLen(maxBytes, v)) + if maxDataLen == 0 { + return nil, true + } nextFrame := s.nextFrame s.nextFrame = nil - - maxDataLen := min(sendWindow, nextFrame.MaxDataLen(maxBytes, v)) if nextFrame.DataLen() > maxDataLen { s.nextFrame = wire.GetStreamFrame() s.nextFrame.StreamID = s.streamID @@ -367,7 +375,7 @@ func (s *sendStream) isNewlyCompleted() bool { return false } // We need to keep the stream around until all frames have been sent and acknowledged. - if s.numOutstandingFrames > 0 || len(s.retransmissionQueue) > 0 { + if s.numOutstandingFrames > 0 || len(s.retransmissionQueue) > 0 || s.queuedResetStreamFrame != nil { return false } // The stream is completed if we sent the FIN. @@ -379,8 +387,8 @@ func (s *sendStream) isNewlyCompleted() bool { // 1. the application called CancelWrite, or // 2. we received a STOP_SENDING, and // * the application consumed the error via Write, or - // * the application called CLsoe - if s.cancelWriteErr != nil && (s.cancellationFlagged || s.finishedWriting) { + // * the application called Close + if s.cancelled && (s.cancellationFlagged || s.finishedWriting) { s.completed = true return true } @@ -389,13 +397,13 @@ func (s *sendStream) isNewlyCompleted() bool { func (s *sendStream) Close() error { s.mutex.Lock() - if s.closeForShutdownErr != nil { + if s.closedForShutdown || s.finishedWriting { s.mutex.Unlock() return nil } s.finishedWriting = true - cancelWriteErr := s.cancelWriteErr - if cancelWriteErr != nil { + cancelled := s.cancelled + if cancelled { s.cancellationFlagged = true } completed := s.isNewlyCompleted() @@ -404,44 +412,60 @@ func (s *sendStream) Close() error { if completed { s.sender.onStreamCompleted(s.streamID) } - if cancelWriteErr != nil { + if cancelled { return fmt.Errorf("close called for canceled stream %d", s.streamID) } - s.sender.onHasStreamData(s.streamID) // need to send the FIN, must be called without holding the mutex + s.sender.onHasStreamData(s.streamID, s) // need to send the FIN, must be called without holding the mutex s.ctxCancel(nil) return nil } func (s *sendStream) CancelWrite(errorCode StreamErrorCode) { - s.cancelWriteImpl(errorCode, false) + s.cancelWrite(errorCode, false) } -func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, remote bool) { +// cancelWrite cancels the stream +// It is possible to cancel a stream after it has been closed, both locally and remotely. +// This is useful to prevent the retransmission of outstanding stream data. +func (s *sendStream) cancelWrite(errorCode qerr.StreamErrorCode, remote bool) { s.mutex.Lock() - if !remote { - s.cancellationFlagged = true - } - if s.cancelWriteErr != nil { + if s.closedForShutdown { s.mutex.Unlock() return } - s.cancelWriteErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: remote} - s.ctxCancel(s.cancelWriteErr) + if !remote { + s.cancellationFlagged = true + if s.cancelled { + completed := s.isNewlyCompleted() + s.mutex.Unlock() + // The user has called CancelWrite. If the previous cancellation was + // because of a STOP_SENDING, we don't need to flag the error to the + // user anymore. + if completed { + s.sender.onStreamCompleted(s.streamID) + } + return + } + } + if s.cancelled { + s.mutex.Unlock() + return + } + s.cancelled = true + s.finalError = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: remote} + s.ctxCancel(s.finalError) s.numOutstandingFrames = 0 s.retransmissionQueue = nil - newlyCompleted := s.isNewlyCompleted() - s.mutex.Unlock() - - s.signalWrite() - s.sender.queueControlFrame(&wire.ResetStreamFrame{ + s.queuedResetStreamFrame = &wire.ResetStreamFrame{ StreamID: s.streamID, FinalSize: s.writeOffset, ErrorCode: errorCode, - }) - if newlyCompleted { - s.sender.onStreamCompleted(s.streamID) } + s.mutex.Unlock() + + s.signalWrite() + s.sender.onHasStreamControlFrame(s.streamID, s) } func (s *sendStream) updateSendWindow(limit protocol.ByteCount) { @@ -453,12 +477,28 @@ func (s *sendStream) updateSendWindow(limit protocol.ByteCount) { hasStreamData := s.dataForWriting != nil || s.nextFrame != nil s.mutex.Unlock() if hasStreamData { - s.sender.onHasStreamData(s.streamID) + s.sender.onHasStreamData(s.streamID, s) } } func (s *sendStream) handleStopSendingFrame(frame *wire.StopSendingFrame) { - s.cancelWriteImpl(frame.ErrorCode, true) + s.cancelWrite(frame.ErrorCode, true) +} + +func (s *sendStream) getControlFrame(time.Time) (_ ackhandler.Frame, ok, hasMore bool) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.queuedResetStreamFrame == nil { + return ackhandler.Frame{}, false, false + } + s.numOutstandingFrames++ + f := ackhandler.Frame{ + Frame: s.queuedResetStreamFrame, + Handler: (*sendStreamResetStreamHandler)(s), + } + s.queuedResetStreamFrame = nil + return f, true, false } func (s *sendStream) Context() context.Context { @@ -478,7 +518,10 @@ func (s *sendStream) SetWriteDeadline(t time.Time) error { // The peer will NOT be informed about this: the stream is closed without sending a FIN or RST. func (s *sendStream) closeForShutdown(err error) { s.mutex.Lock() - s.closeForShutdownErr = err + s.closedForShutdown = true + if s.finalError == nil && !s.finishedWriting { + s.finalError = err + } s.mutex.Unlock() s.signalWrite() } @@ -499,7 +542,7 @@ func (s *sendStreamAckHandler) OnAcked(f wire.Frame) { sf := f.(*wire.StreamFrame) sf.PutBack() s.mutex.Lock() - if s.cancelWriteErr != nil { + if s.cancelled { s.mutex.Unlock() return } @@ -507,10 +550,10 @@ func (s *sendStreamAckHandler) OnAcked(f wire.Frame) { if s.numOutstandingFrames < 0 { panic("numOutStandingFrames negative") } - newlyCompleted := (*sendStream)(s).isNewlyCompleted() + completed := (*sendStream)(s).isNewlyCompleted() s.mutex.Unlock() - if newlyCompleted { + if completed { s.sender.onStreamCompleted(s.streamID) } } @@ -518,7 +561,7 @@ func (s *sendStreamAckHandler) OnAcked(f wire.Frame) { func (s *sendStreamAckHandler) OnLost(f wire.Frame) { sf := f.(*wire.StreamFrame) s.mutex.Lock() - if s.cancelWriteErr != nil { + if s.cancelled { s.mutex.Unlock() return } @@ -530,5 +573,31 @@ func (s *sendStreamAckHandler) OnLost(f wire.Frame) { } s.mutex.Unlock() - s.sender.onHasStreamData(s.streamID) + s.sender.onHasStreamData(s.streamID, (*sendStream)(s)) +} + +type sendStreamResetStreamHandler sendStream + +var _ ackhandler.FrameHandler = &sendStreamResetStreamHandler{} + +func (s *sendStreamResetStreamHandler) OnAcked(wire.Frame) { + s.mutex.Lock() + s.numOutstandingFrames-- + if s.numOutstandingFrames < 0 { + panic("numOutStandingFrames negative") + } + completed := (*sendStream)(s).isNewlyCompleted() + s.mutex.Unlock() + + if completed { + s.sender.onStreamCompleted(s.streamID) + } +} + +func (s *sendStreamResetStreamHandler) OnLost(f wire.Frame) { + s.mutex.Lock() + s.queuedResetStreamFrame = f.(*wire.ResetStreamFrame) + s.numOutstandingFrames-- + s.mutex.Unlock() + s.sender.onHasStreamControlFrame(s.streamID, (*sendStream)(s)) } diff --git a/vendor/github.com/quic-go/quic-go/server.go b/vendor/github.com/quic-go/quic-go/server.go index a55bdd43..3eb8e1d4 100644 --- a/vendor/github.com/quic-go/quic-go/server.go +++ b/vendor/github.com/quic-go/quic-go/server.go @@ -17,8 +17,13 @@ import ( "github.com/quic-go/quic-go/logging" ) -// ErrServerClosed is returned by the Listener or EarlyListener's Accept method after a call to Close. -var ErrServerClosed = errors.New("quic: server closed") +// ErrServerClosed is returned by the [Listener] or [EarlyListener]'s Accept method after a call to Close. +var ErrServerClosed = errServerClosed{} + +type errServerClosed struct{} + +func (errServerClosed) Error() string { return "quic: server closed" } +func (errServerClosed) Unwrap() error { return net.ErrClosed } // packetHandler handles packets type packetHandler interface { @@ -56,6 +61,7 @@ type rejectedPacket struct { // A Listener of QUIC type baseServer struct { + tr *Transport disableVersionNegotiation bool acceptEarlyConns bool @@ -67,9 +73,9 @@ type baseServer struct { tokenGenerator *handshake.TokenGenerator maxTokenAge time.Duration - connIDGenerator ConnectionIDGenerator - connHandler packetHandlerManager - onClose func() + connIDGenerator ConnectionIDGenerator + statelessResetter *statelessResetter + onClose func() receivedPackets chan receivedPacket @@ -83,14 +89,14 @@ type baseServer struct { context.Context, context.CancelCauseFunc, sendConn, - connRunner, + *Transport, protocol.ConnectionID, /* original dest connection ID */ *protocol.ConnectionID, /* retry src connection ID */ protocol.ConnectionID, /* client dest connection ID */ protocol.ConnectionID, /* destination connection ID */ protocol.ConnectionID, /* source connection ID */ ConnectionIDGenerator, - protocol.StatelessResetToken, + *statelessResetter, *Config, *tls.Config, *handshake.TokenGenerator, @@ -100,15 +106,24 @@ type baseServer struct { protocol.Version, ) quicConn - closeMx sync.Mutex - errorChan chan struct{} // is closed when the server is closed - closeErr error - running chan struct{} // closed as soon as run() returns + closeMx sync.Mutex + // errorChan is closed when Close is called. This has two effects: + // 1. it cancels handshakes that are still in flight (using CONNECTION_REFUSED) errors + // 2. it stops handling of packets passed to this server + errorChan chan struct{} + // acceptChan is closed when Close returns. + // This only happens once all handshake in flight have either completed and canceled. + // Calls to Accept will first drain the queue of connections that have completed the handshake, + // and then return ErrServerClosed. + stopAccepting chan struct{} + closeErr error + running chan struct{} // closed as soon as run() returns versionNegotiationQueue chan receivedPacket invalidTokenQueue chan rejectedPacket connectionRefusedQueue chan rejectedPacket retryQueue chan rejectedPacket + handshakingCount sync.WaitGroup verifySourceAddress func(net.Addr) bool @@ -131,11 +146,11 @@ func (l *Listener) Accept(ctx context.Context) (Connection, error) { } // Close closes the listener. -// Accept will return ErrServerClosed as soon as all connections in the accept queue have been accepted. +// Accept will return [ErrServerClosed] as soon as all connections in the accept queue have been accepted. // QUIC handshakes that are still in flight will be rejected with a CONNECTION_REFUSED error. // The effect of closing the listener depends on how it was created: -// * if it was created using Transport.Listen, already established connections will be unaffected -// * if it was created using the Listen convenience method, all established connection will be closed immediately +// - if it was created using [Transport.Listen], already established connections will be unaffected +// - if it was created using the [Listen] convenience method, all established connection will be closed immediately func (l *Listener) Close() error { return l.baseServer.Close() } @@ -171,7 +186,7 @@ func (l *EarlyListener) Addr() net.Addr { } // ListenAddr creates a QUIC server listening on a given address. -// See Listen for more details. +// See [Listen] for more details. func ListenAddr(addr string, tlsConf *tls.Config, config *Config) (*Listener, error) { conn, err := listenUDP(addr) if err != nil { @@ -184,7 +199,7 @@ func ListenAddr(addr string, tlsConf *tls.Config, config *Config) (*Listener, er }).Listen(tlsConf, config) } -// ListenAddrEarly works like ListenAddr, but it returns connections before the handshake completes. +// ListenAddrEarly works like [ListenAddr], but it returns connections before the handshake completes. func ListenAddrEarly(addr string, tlsConf *tls.Config, config *Config) (*EarlyListener, error) { conn, err := listenUDP(addr) if err != nil { @@ -206,16 +221,16 @@ func listenUDP(addr string) (*net.UDPConn, error) { } // Listen listens for QUIC connections on a given net.PacketConn. -// If the PacketConn satisfies the OOBCapablePacketConn interface (as a net.UDPConn does), +// If the PacketConn satisfies the [OOBCapablePacketConn] interface (as a [net.UDPConn] does), // ECN and packet info support will be enabled. In this case, ReadMsgUDP and WriteMsgUDP // will be used instead of ReadFrom and WriteTo to read/write packets. // A single net.PacketConn can only be used for a single call to Listen. // // The tls.Config must not be nil and must contain a certificate configuration. -// Furthermore, it must define an application control (using NextProtos). +// Furthermore, it must define an application control (using [NextProtos]). // The quic.Config may be nil, in that case the default values will be used. // -// This is a convenience function. More advanced use cases should instantiate a Transport, +// This is a convenience function. More advanced use cases should instantiate a [Transport], // which offers configuration options for a more fine-grained control of the connection establishment, // including reusing the underlying UDP socket for outgoing QUIC connections. // When closing a listener created with Listen, all established QUIC connections will be closed immediately. @@ -224,7 +239,7 @@ func Listen(conn net.PacketConn, tlsConf *tls.Config, config *Config) (*Listener return tr.Listen(tlsConf, config) } -// ListenEarly works like Listen, but it returns connections before the handshake completes. +// ListenEarly works like [Listen], but it returns connections before the handshake completes. func ListenEarly(conn net.PacketConn, tlsConf *tls.Config, config *Config) (*EarlyListener, error) { tr := &Transport{Conn: conn, isSingleUse: true} return tr.ListenEarly(tlsConf, config) @@ -232,8 +247,9 @@ func ListenEarly(conn net.PacketConn, tlsConf *tls.Config, config *Config) (*Ear func newServer( conn rawConn, - connHandler packetHandlerManager, + tr *Transport, connIDGenerator ConnectionIDGenerator, + statelessResetter *statelessResetter, connContext func(context.Context) context.Context, tlsConf *tls.Config, config *Config, @@ -248,15 +264,17 @@ func newServer( s := &baseServer{ conn: conn, connContext: connContext, + tr: tr, tlsConf: tlsConf, config: config, tokenGenerator: handshake.NewTokenGenerator(tokenGeneratorKey), maxTokenAge: maxTokenAge, verifySourceAddress: verifySourceAddress, connIDGenerator: connIDGenerator, - connHandler: connHandler, + statelessResetter: statelessResetter, connQueue: make(chan quicConn, protocol.MaxAcceptQueueSize), errorChan: make(chan struct{}), + stopAccepting: make(chan struct{}), running: make(chan struct{}), receivedPackets: make(chan receivedPacket, protocol.MaxServerUnprocessedPackets), versionNegotiationQueue: make(chan receivedPacket, 4), @@ -327,7 +345,13 @@ func (s *baseServer) accept(ctx context.Context) (quicConn, error) { return nil, ctx.Err() case conn := <-s.connQueue: return conn, nil - case <-s.errorChan: + case <-s.stopAccepting: + // first drain the queue + select { + case conn := <-s.connQueue: + return conn, nil + default: + } return nil, s.closeErr } } @@ -351,6 +375,9 @@ func (s *baseServer) close(e error, notifyOnClose bool) { if notifyOnClose { s.onClose() } + // wait until all handshakes in flight have terminated + s.handshakingCount.Wait() + close(s.stopAccepting) } // Addr returns the server's network address @@ -361,6 +388,8 @@ func (s *baseServer) Addr() net.Addr { func (s *baseServer) handlePacket(p receivedPacket) { select { case s.receivedPackets <- p: + case <-s.errorChan: + return default: s.logger.Debugf("Dropping packet from %s (%d bytes). Server receive queue full.", p.remoteAddr, p.Size()) if s.tracer != nil && s.tracer.DroppedPacket != nil { @@ -397,6 +426,9 @@ func (s *baseServer) handlePacketImpl(p receivedPacket) bool /* is the buffer st // send a Version Negotiation Packet if the client is speaking a different protocol version if !protocol.IsSupportedVersion(s.config.Versions, v) { if s.disableVersionNegotiation { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedVersion) + } return false } @@ -469,7 +501,7 @@ func (s *baseServer) handle0RTTPacket(p receivedPacket) bool { } // check again if we might have a connection now - if handler, ok := s.connHandler.Get(connID); ok { + if handler, ok := s.tr.connRunner().Get(connID); ok { handler.handlePacket(p) return true } @@ -559,7 +591,7 @@ func (s *baseServer) handleInitialImpl(p receivedPacket, hdr *wire.Header) error // The server queues packets for a while, and we might already have established a connection by now. // This results in a second check in the connection map. // That's ok since it's not the hot path (it's only taken by some Initial and 0-RTT packets). - if handler, ok := s.connHandler.Get(hdr.DestConnectionID); ok { + if handler, ok := s.tr.connRunner().Get(hdr.DestConnectionID); ok { handler.handlePacket(p) return nil } @@ -617,7 +649,7 @@ func (s *baseServer) handleInitialImpl(p receivedPacket, hdr *wire.Header) error config := s.config if s.config.GetConfigForClient != nil { - conf, err := s.config.GetConfigForClient(&ClientHelloInfo{ + conf, err := s.config.GetConfigForClient(&ClientInfo{ RemoteAddr: p.remoteAddr, AddrVerified: clientAddrVerified, }) @@ -674,14 +706,14 @@ func (s *baseServer) handleInitialImpl(p receivedPacket, hdr *wire.Header) error ctx, cancel, newSendConn(s.conn, p.remoteAddr, p.info, s.logger), - s.connHandler, + s.tr, origDestConnID, retrySrcConnID, hdr.DestConnectionID, hdr.SrcConnectionID, connID, s.connIDGenerator, - s.connHandler.GetStatelessResetToken(connID), + s.statelessResetter, config, s.tlsConf, s.tokenGenerator, @@ -695,7 +727,7 @@ func (s *baseServer) handleInitialImpl(p receivedPacket, hdr *wire.Header) error // This is very unlikely: Even if an attacker chooses a connection ID that's already in use, // under normal circumstances the packet would just be routed to that connection. // The only time this collision will occur if we receive the two Initial packets at the same time. - if added := s.connHandler.AddWithConnID(hdr.DestConnectionID, connID, conn); !added { + if added := s.tr.connRunner().AddWithConnID(hdr.DestConnectionID, connID, conn); !added { delete(s.zeroRTTQueues, hdr.DestConnectionID) conn.closeWithTransportError(qerr.ConnectionRefused) return nil @@ -708,43 +740,42 @@ func (s *baseServer) handleInitialImpl(p receivedPacket, hdr *wire.Header) error delete(s.zeroRTTQueues, hdr.DestConnectionID) } - go conn.run() + s.handshakingCount.Add(1) go func() { - if completed := s.handleNewConn(conn); !completed { - return - } - - select { - case s.connQueue <- conn: - default: - conn.closeWithTransportError(ConnectionRefused) - } + defer s.handshakingCount.Done() + s.handleNewConn(conn) }() + go conn.run() return nil } -func (s *baseServer) handleNewConn(conn quicConn) bool { +func (s *baseServer) handleNewConn(conn quicConn) { if s.acceptEarlyConns { // wait until the early connection is ready, the handshake fails, or the server is closed select { case <-s.errorChan: conn.closeWithTransportError(ConnectionRefused) - return false + return case <-conn.Context().Done(): - return false + return case <-conn.earlyConnReady(): - return true + } + } else { + // wait until the handshake completes, fails, or the server is closed + select { + case <-s.errorChan: + conn.closeWithTransportError(ConnectionRefused) + return + case <-conn.Context().Done(): + return + case <-conn.HandshakeComplete(): } } - // wait until the handshake completes, fails, or the server is closed + select { - case <-s.errorChan: + case s.connQueue <- conn: + default: conn.closeWithTransportError(ConnectionRefused) - return false - case <-conn.Context().Done(): - return false - case <-conn.HandshakeComplete(): - return true } } @@ -803,7 +834,7 @@ func (s *baseServer) maybeSendInvalidToken(p rejectedPacket) { hdr := p.hdr sealer, opener := handshake.NewInitialAEAD(hdr.DestConnectionID, protocol.PerspectiveServer, hdr.Version) data := p.data[:hdr.ParsedLen()+hdr.Length] - extHdr, err := unpackLongHeader(opener, hdr, data, hdr.Version) + extHdr, err := unpackLongHeader(opener, hdr, data) // Only send INVALID_TOKEN if we can unprotect the packet. // This makes sure that we won't send it for packets that were corrupted. if err != nil { diff --git a/vendor/github.com/quic-go/quic-go/stateless_reset.go b/vendor/github.com/quic-go/quic-go/stateless_reset.go new file mode 100644 index 00000000..cd0059a5 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/stateless_reset.go @@ -0,0 +1,42 @@ +package quic + +import ( + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "hash" + "sync" + + "github.com/quic-go/quic-go/internal/protocol" +) + +type statelessResetter struct { + mx sync.Mutex + h hash.Hash +} + +// newStatelessRetter creates a new stateless reset generator. +// It is valid to use a nil key. In that case, a random key will be used. +// This makes is impossible for on-path attackers to shut down established connections. +func newStatelessResetter(key *StatelessResetKey) *statelessResetter { + var h hash.Hash + if key != nil { + h = hmac.New(sha256.New, key[:]) + } else { + b := make([]byte, 32) + _, _ = rand.Read(b) + h = hmac.New(sha256.New, b) + } + return &statelessResetter{h: h} +} + +func (r *statelessResetter) GetStatelessResetToken(connID protocol.ConnectionID) protocol.StatelessResetToken { + r.mx.Lock() + defer r.mx.Unlock() + + var token protocol.StatelessResetToken + r.h.Write(connID.Bytes()) + copy(token[:], r.h.Sum(nil)) + r.h.Reset() + return token +} diff --git a/vendor/github.com/quic-go/quic-go/stream.go b/vendor/github.com/quic-go/quic-go/stream.go index d6c809ad..9cd2695d 100644 --- a/vendor/github.com/quic-go/quic-go/stream.go +++ b/vendor/github.com/quic-go/quic-go/stream.go @@ -24,8 +24,9 @@ var errDeadline net.Error = &deadlineError{} // The streamSender is notified by the stream about various events. type streamSender interface { - queueControlFrame(wire.Frame) - onHasStreamData(protocol.StreamID) + onHasConnectionData() + onHasStreamData(protocol.StreamID, sendStreamI) + onHasStreamControlFrame(protocol.StreamID, streamControlFrameGetter) // must be called without holding the mutex that is acquired by closeForShutdown onStreamCompleted(protocol.StreamID) } @@ -34,19 +35,16 @@ type streamSender interface { // This is necessary in order to keep track when both halves have been completed. type uniStreamSender struct { streamSender - onStreamCompletedImpl func() + onStreamCompletedImpl func() + onHasStreamControlFrameImpl func(protocol.StreamID, streamControlFrameGetter) } -func (s *uniStreamSender) queueControlFrame(f wire.Frame) { - s.streamSender.queueControlFrame(f) +func (s *uniStreamSender) onHasStreamData(id protocol.StreamID, str sendStreamI) { + s.streamSender.onHasStreamData(id, str) } - -func (s *uniStreamSender) onHasStreamData(id protocol.StreamID) { - s.streamSender.onHasStreamData(id) -} - -func (s *uniStreamSender) onStreamCompleted(protocol.StreamID) { - s.onStreamCompletedImpl() +func (s *uniStreamSender) onStreamCompleted(protocol.StreamID) { s.onStreamCompletedImpl() } +func (s *uniStreamSender) onHasStreamControlFrame(id protocol.StreamID, str streamControlFrameGetter) { + s.onHasStreamControlFrameImpl(id, str) } var _ streamSender = &uniStreamSender{} @@ -55,13 +53,12 @@ type streamI interface { Stream closeForShutdown(error) // for receiving - handleStreamFrame(*wire.StreamFrame) error - handleResetStreamFrame(*wire.ResetStreamFrame) error - getWindowUpdate() protocol.ByteCount + handleStreamFrame(*wire.StreamFrame, time.Time) error + handleResetStreamFrame(*wire.ResetStreamFrame, time.Time) error // for sending hasData() bool handleStopSendingFrame(*wire.StopSendingFrame) - popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (ackhandler.StreamFrame, bool, bool) + popStreamFrame(protocol.ByteCount, protocol.Version) (_ ackhandler.StreamFrame, _ *wire.StreamDataBlockedFrame, hasMore bool) updateSendWindow(protocol.ByteCount) } @@ -83,7 +80,10 @@ type stream struct { sendStreamCompleted bool } -var _ Stream = &stream{} +var ( + _ Stream = &stream{} + _ streamControlFrameGetter = &receiveStream{} +) // newStream creates a new Stream func newStream( @@ -101,6 +101,9 @@ func newStream( s.checkIfCompleted() s.completedMutex.Unlock() }, + onHasStreamControlFrameImpl: func(id protocol.StreamID, str streamControlFrameGetter) { + sender.onHasStreamControlFrame(streamID, s) + }, } s.sendStream = *newSendStream(ctx, streamID, senderForSendStream, flowController) senderForReceiveStream := &uniStreamSender{ @@ -111,6 +114,9 @@ func newStream( s.checkIfCompleted() s.completedMutex.Unlock() }, + onHasStreamControlFrameImpl: func(id protocol.StreamID, str streamControlFrameGetter) { + sender.onHasStreamControlFrame(streamID, s) + }, } s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController) return s @@ -126,6 +132,14 @@ func (s *stream) Close() error { return s.sendStream.Close() } +func (s *stream) getControlFrame(now time.Time) (_ ackhandler.Frame, ok, hasMore bool) { + f, ok, _ := s.sendStream.getControlFrame(now) + if ok { + return f, true, true + } + return s.receiveStream.getControlFrame(now) +} + func (s *stream) SetDeadline(t time.Time) error { _ = s.SetReadDeadline(t) // SetReadDeadline never errors _ = s.SetWriteDeadline(t) // SetWriteDeadline never errors diff --git a/vendor/github.com/quic-go/quic-go/streams_map.go b/vendor/github.com/quic-go/quic-go/streams_map.go index 0ba23b25..510b3e59 100644 --- a/vendor/github.com/quic-go/quic-go/streams_map.go +++ b/vendor/github.com/quic-go/quic-go/streams_map.go @@ -2,9 +2,7 @@ package quic import ( "context" - "errors" "fmt" - "net" "sync" "github.com/quic-go/quic-go/internal/flowcontrol" @@ -34,15 +32,12 @@ func convertStreamError(err error, stype protocol.StreamType, pers protocol.Pers return fmt.Errorf(strError.Error(), ids...) } -type streamOpenErr struct{ error } +// StreamLimitReachedError is returned from Connection.OpenStream and Connection.OpenUniStream +// when it is not possible to open a new stream because the number of opens streams reached +// the peer's stream limit. +type StreamLimitReachedError struct{} -var _ net.Error = &streamOpenErr{} - -func (e streamOpenErr) Temporary() bool { return e.error == errTooManyOpenStreams } -func (streamOpenErr) Timeout() bool { return false } - -// errTooManyOpenStreams is used internally by the outgoing streams maps. -var errTooManyOpenStreams = errors.New("too many open streams") +func (e StreamLimitReachedError) Error() string { return "too many open streams" } type streamsMap struct { ctx context.Context // not used for cancellations, but carries the values associated with the connection @@ -52,6 +47,7 @@ type streamsMap struct { maxIncomingUniStreams uint64 sender streamSender + queueControlFrame func(wire.Frame) newFlowController func(protocol.StreamID) flowcontrol.StreamFlowController mutex sync.Mutex @@ -67,14 +63,16 @@ var _ streamManager = &streamsMap{} func newStreamsMap( ctx context.Context, sender streamSender, + queueControlFrame func(wire.Frame), newFlowController func(protocol.StreamID) flowcontrol.StreamFlowController, maxIncomingBidiStreams uint64, maxIncomingUniStreams uint64, perspective protocol.Perspective, -) streamManager { +) *streamsMap { m := &streamsMap{ ctx: ctx, perspective: perspective, + queueControlFrame: queueControlFrame, newFlowController: newFlowController, maxIncomingBidiStreams: maxIncomingBidiStreams, maxIncomingUniStreams: maxIncomingUniStreams, @@ -91,7 +89,7 @@ func (m *streamsMap) initMaps() { id := num.StreamID(protocol.StreamTypeBidi, m.perspective) return newStream(m.ctx, id, m.sender, m.newFlowController(id)) }, - m.sender.queueControlFrame, + m.queueControlFrame, ) m.incomingBidiStreams = newIncomingStreamsMap( protocol.StreamTypeBidi, @@ -100,7 +98,7 @@ func (m *streamsMap) initMaps() { return newStream(m.ctx, id, m.sender, m.newFlowController(id)) }, m.maxIncomingBidiStreams, - m.sender.queueControlFrame, + m.queueControlFrame, ) m.outgoingUniStreams = newOutgoingStreamsMap( protocol.StreamTypeUni, @@ -108,7 +106,7 @@ func (m *streamsMap) initMaps() { id := num.StreamID(protocol.StreamTypeUni, m.perspective) return newSendStream(m.ctx, id, m.sender, m.newFlowController(id)) }, - m.sender.queueControlFrame, + m.queueControlFrame, ) m.incomingUniStreams = newIncomingStreamsMap( protocol.StreamTypeUni, @@ -117,7 +115,7 @@ func (m *streamsMap) initMaps() { return newReceiveStream(id, m.sender, m.newFlowController(id)) }, m.maxIncomingUniStreams, - m.sender.queueControlFrame, + m.queueControlFrame, ) } diff --git a/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go b/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go index fd45f4e7..d4c18259 100644 --- a/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go +++ b/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go @@ -2,6 +2,7 @@ package quic import ( "context" + "slices" "sync" "github.com/quic-go/quic-go/internal/protocol" @@ -19,9 +20,7 @@ type outgoingStreamsMap[T outgoingStream] struct { streamType protocol.StreamType streams map[protocol.StreamNum]T - openQueue map[uint64]chan struct{} - lowestInQueue uint64 - highestInQueue uint64 + openQueue []chan struct{} nextStream protocol.StreamNum // stream ID of the stream returned by OpenStream(Sync) maxStream protocol.StreamNum // the maximum stream ID we're allowed to open @@ -41,7 +40,6 @@ func newOutgoingStreamsMap[T outgoingStream]( return &outgoingStreamsMap[T]{ streamType: streamType, streams: make(map[protocol.StreamNum]T), - openQueue: make(map[uint64]chan struct{}), maxStream: protocol.InvalidStreamNum, nextStream: 1, newStream: newStream, @@ -60,7 +58,7 @@ func (m *outgoingStreamsMap[T]) OpenStream() (T, error) { // if there are OpenStreamSync calls waiting, return an error here if len(m.openQueue) > 0 || m.nextStream > m.maxStream { m.maybeSendBlockedFrame() - return *new(T), streamOpenErr{errTooManyOpenStreams} + return *new(T), &StreamLimitReachedError{} } return m.openStream(), nil } @@ -72,22 +70,15 @@ func (m *outgoingStreamsMap[T]) OpenStreamSync(ctx context.Context) (T, error) { if m.closeErr != nil { return *new(T), m.closeErr } - if err := ctx.Err(); err != nil { return *new(T), err } - if len(m.openQueue) == 0 && m.nextStream <= m.maxStream { return m.openStream(), nil } waitChan := make(chan struct{}, 1) - queuePos := m.highestInQueue - m.highestInQueue++ - if len(m.openQueue) == 0 { - m.lowestInQueue = queuePos - } - m.openQueue[queuePos] = waitChan + m.openQueue = append(m.openQueue, waitChan) m.maybeSendBlockedFrame() for { @@ -95,12 +86,17 @@ func (m *outgoingStreamsMap[T]) OpenStreamSync(ctx context.Context) (T, error) { select { case <-ctx.Done(): m.mutex.Lock() - delete(m.openQueue, queuePos) + m.openQueue = slices.DeleteFunc(m.openQueue, func(c chan struct{}) bool { + return c == waitChan + }) + // If we just received a MAX_STREAMS frame, this might have been the next stream + // that could be opened. Make sure we unblock the next OpenStreamSync call. + m.maybeUnblockOpenSync() return *new(T), ctx.Err() case <-waitChan: } - m.mutex.Lock() + m.mutex.Lock() if m.closeErr != nil { return *new(T), m.closeErr } @@ -109,9 +105,8 @@ func (m *outgoingStreamsMap[T]) OpenStreamSync(ctx context.Context) (T, error) { continue } str := m.openStream() - delete(m.openQueue, queuePos) - m.lowestInQueue = queuePos + 1 - m.unblockOpenSync() + m.openQueue = m.openQueue[1:] + m.maybeUnblockOpenSync() return str, nil } } @@ -181,7 +176,7 @@ func (m *outgoingStreamsMap[T]) SetMaxStream(num protocol.StreamNum) { if m.maxStream < m.nextStream-1+protocol.StreamNum(len(m.openQueue)) { m.maybeSendBlockedFrame() } - m.unblockOpenSync() + m.maybeUnblockOpenSync() } // UpdateSendWindow is called when the peer's transport parameters are received. @@ -196,27 +191,25 @@ func (m *outgoingStreamsMap[T]) UpdateSendWindow(limit protocol.ByteCount) { } // unblockOpenSync unblocks the next OpenStreamSync go-routine to open a new stream -func (m *outgoingStreamsMap[T]) unblockOpenSync() { +func (m *outgoingStreamsMap[T]) maybeUnblockOpenSync() { if len(m.openQueue) == 0 { return } - for qp := m.lowestInQueue; qp <= m.highestInQueue; qp++ { - c, ok := m.openQueue[qp] - if !ok { // entry was deleted because the context was canceled - continue - } - // unblockOpenSync is called both from OpenStreamSync and from SetMaxStream. - // It's sufficient to only unblock OpenStreamSync once. - select { - case c <- struct{}{}: - default: - } + if m.nextStream > m.maxStream { return } + // unblockOpenSync is called both from OpenStreamSync and from SetMaxStream. + // It's sufficient to only unblock OpenStreamSync once. + select { + case m.openQueue[0] <- struct{}{}: + default: + } } func (m *outgoingStreamsMap[T]) CloseWithError(err error) { m.mutex.Lock() + defer m.mutex.Unlock() + m.closeErr = err for _, str := range m.streams { str.closeForShutdown(err) @@ -226,5 +219,5 @@ func (m *outgoingStreamsMap[T]) CloseWithError(err error) { close(c) } } - m.mutex.Unlock() + m.openQueue = nil } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn.go b/vendor/github.com/quic-go/quic-go/sys_conn.go index 71cc4607..8159a146 100644 --- a/vendor/github.com/quic-go/quic-go/sys_conn.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn.go @@ -14,7 +14,7 @@ import ( ) // OOBCapablePacketConn is a connection that allows the reading of ECN bits from the IP header. -// If the PacketConn passed to Dial or Listen satisfies this interface, quic-go will use it. +// If the PacketConn passed to the [Transport] satisfies this interface, quic-go will use it. // In this case, ReadMsgUDP() will be used instead of ReadFrom() to read packets. type OOBCapablePacketConn interface { net.PacketConn @@ -58,8 +58,8 @@ func wrapConn(pc net.PacketConn) (rawConn, error) { return nil, err } + // only set DF on UDP sockets if _, ok := pc.LocalAddr().(*net.UDPAddr); ok { - // Only set DF on sockets that we expect to be able to handle that configuration. var err error supportsDF, err = setDF(rawConn) if err != nil { @@ -92,7 +92,7 @@ func (c *basicConn) ReadPacket() (receivedPacket, error) { // The packet size should not exceed protocol.MaxPacketBufferSize bytes // If it does, we only read a truncated packet, which will then end up undecryptable buffer.Data = buffer.Data[:protocol.MaxPacketBufferSize] - n, addr, err := c.PacketConn.ReadFrom(buffer.Data) + n, addr, err := c.ReadFrom(buffer.Data) if err != nil { return receivedPacket{}, err } @@ -111,7 +111,7 @@ func (c *basicConn) WritePacket(b []byte, addr net.Addr, _ []byte, gsoSize uint1 if ecn != protocol.ECNUnsupported { panic("cannot use ECN with a basicConn") } - return c.PacketConn.WriteTo(b, addr) + return c.WriteTo(b, addr) } func (c *basicConn) capabilities() connCapabilities { return connCapabilities{DF: c.supportsDF} } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go index b51cd8f1..8ed273ee 100644 --- a/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go @@ -4,47 +4,67 @@ package quic import ( "errors" + "fmt" "strconv" "strings" "syscall" "golang.org/x/sys/unix" +) - "github.com/quic-go/quic-go/internal/utils" +// for macOS versions, see https://en.wikipedia.org/wiki/Darwin_(operating_system)#Darwin_20_onwards +const ( + macOSVersion11 = 20 + macOSVersion15 = 24 ) func setDF(rawConn syscall.RawConn) (bool, error) { - // Setting DF bit is only supported from macOS11 + // Setting DF bit is only supported from macOS 11. // https://github.com/chromium/chromium/blob/117.0.5881.2/net/socket/udp_socket_posix.cc#L555 - if supportsDF, err := isAtLeastMacOS11(); !supportsDF || err != nil { + version, err := getMacOSVersion() + if err != nil || version < macOSVersion11 { return false, err } - // Enabling IP_DONTFRAG will force the kernel to return "sendto: message too long" - // and the datagram will not be fragmented - var errDFIPv4, errDFIPv6 error + var controlErr error + var disableDF bool if err := rawConn.Control(func(fd uintptr) { - errDFIPv4 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_DONTFRAG, 1) - errDFIPv6 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_DONTFRAG, 1) + addr, err := unix.Getsockname(int(fd)) + if err != nil { + controlErr = fmt.Errorf("getsockname: %w", err) + return + } + + // Dual-stack sockets are effectively IPv6 sockets (with IPV6_ONLY set to 0). + // On macOS, the DF bit on dual-stack sockets is controlled by the IPV6_DONTFRAG option. + // See https://datatracker.ietf.org/doc/draft-seemann-tsvwg-udp-fragmentation/ for details. + switch addr.(type) { + case *unix.SockaddrInet4: + controlErr = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_DONTFRAG, 1) + case *unix.SockaddrInet6: + controlErr = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_DONTFRAG, 1) + + // Setting the DF bit on dual-stack sockets works since macOS Sequoia. + // Disable DF on dual-stack sockets before Sequoia. + if version < macOSVersion15 { + // check if this is a dual-stack socket by reading the IPV6_V6ONLY flag + v6only, err := unix.GetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_V6ONLY) + if err != nil { + controlErr = fmt.Errorf("getting IPV6_V6ONLY: %w", err) + return + } + disableDF = v6only == 0 + } + default: + controlErr = fmt.Errorf("unknown address type: %T", addr) + } }); err != nil { return false, err } - switch { - case errDFIPv4 == nil && errDFIPv6 == nil: - utils.DefaultLogger.Debugf("Setting DF for IPv4 and IPv6.") - case errDFIPv4 == nil && errDFIPv6 != nil: - utils.DefaultLogger.Debugf("Setting DF for IPv4.") - case errDFIPv4 != nil && errDFIPv6 == nil: - utils.DefaultLogger.Debugf("Setting DF for IPv6.") - // On macOS, the syscall for setting DF bit for IPv4 fails on dual-stack listeners. - // Treat the connection as not having DF enabled, even though the DF bit will be set - // when used for IPv6. - // See https://github.com/quic-go/quic-go/issues/3793 for details. - return false, nil - case errDFIPv4 != nil && errDFIPv6 != nil: - return false, errors.New("setting DF failed for both IPv4 and IPv6") + if controlErr != nil { + return false, controlErr } - return true, nil + return !disableDF, nil } func isSendMsgSizeErr(err error) bool { @@ -53,22 +73,20 @@ func isSendMsgSizeErr(err error) bool { func isRecvMsgSizeErr(error) bool { return false } -func isAtLeastMacOS11() (bool, error) { +func getMacOSVersion() (int, error) { uname := &unix.Utsname{} - err := unix.Uname(uname) - if err != nil { - return false, err + if err := unix.Uname(uname); err != nil { + return 0, err } release := string(uname.Release[:]) - if idx := strings.Index(release, "."); idx != -1 { - version, err := strconv.Atoi(release[:idx]) - if err != nil { - return false, err - } - // Darwin version 20 is macOS version 11 - // https://en.wikipedia.org/wiki/Darwin_(operating_system)#Darwin_20_onwards - return version >= 20, nil + idx := strings.Index(release, ".") + if idx == -1 { + return 0, nil } - return false, nil + version, err := strconv.Atoi(release[:idx]) + if err != nil { + return 0, err + } + return version, nil } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go index f09eaa5d..b09a2394 100644 --- a/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go @@ -16,8 +16,8 @@ func setDF(rawConn syscall.RawConn) (bool, error) { // and the datagram will not be fragmented var errDFIPv4, errDFIPv6 error if err := rawConn.Control(func(fd uintptr) { - errDFIPv4 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_MTU_DISCOVER, unix.IP_PMTUDISC_DO) - errDFIPv6 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_MTU_DISCOVER, unix.IPV6_PMTUDISC_DO) + errDFIPv4 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_MTU_DISCOVER, unix.IP_PMTUDISC_PROBE) + errDFIPv6 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_MTU_DISCOVER, unix.IPV6_PMTUDISC_PROBE) }); err != nil { return false, err } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go index e27635ec..4c140f00 100644 --- a/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go @@ -12,21 +12,19 @@ import ( ) const ( - // IP_DONTFRAGMENT controls the Don't Fragment (DF) bit. - // - // It's the same code point for both IPv4 and IPv6 on Windows. - // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/constant.IP_DONTFRAG.html - // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/constant.IPV6_DONTFRAG.html - // + // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/constant.IP_DONTFRAGMENT.html //nolint:stylecheck IP_DONTFRAGMENT = 14 + // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/constant.IPV6_DONTFRAG.html + //nolint:stylecheck + IPV6_DONTFRAG = 14 ) func setDF(rawConn syscall.RawConn) (bool, error) { var errDFIPv4, errDFIPv6 error if err := rawConn.Control(func(fd uintptr) { errDFIPv4 = windows.SetsockoptInt(windows.Handle(fd), windows.IPPROTO_IP, IP_DONTFRAGMENT, 1) - errDFIPv6 = windows.SetsockoptInt(windows.Handle(fd), windows.IPPROTO_IPV6, IP_DONTFRAGMENT, 1) + errDFIPv6 = windows.SetsockoptInt(windows.Handle(fd), windows.IPPROTO_IPV6, IPV6_DONTFRAG, 1) }); err != nil { return false, err } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_oob.go b/vendor/github.com/quic-go/quic-go/sys_conn_oob.go index a6795ca2..5ed1b656 100644 --- a/vendor/github.com/quic-go/quic-go/sys_conn_oob.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn_oob.go @@ -83,7 +83,7 @@ func newConn(c OOBCapablePacketConn, supportsDF bool) (*oobConn, error) { if err != nil { return nil, err } - needsPacketInfo := false + var needsPacketInfo bool if udpAddr, ok := c.LocalAddr().(*net.UDPAddr); ok && udpAddr.IP.IsUnspecified() { needsPacketInfo = true } @@ -257,7 +257,7 @@ func (c *oobConn) WritePacket(b []byte, addr net.Addr, packetInfoOOB []byte, gso } } } - n, _, err := c.OOBCapablePacketConn.WriteMsgUDP(b, oob, addr.(*net.UDPAddr)) + n, _, err := c.WriteMsgUDP(b, oob, addr.(*net.UDPAddr)) return n, err } diff --git a/vendor/github.com/quic-go/quic-go/transport.go b/vendor/github.com/quic-go/quic-go/transport.go index 059f30f5..a7775fb1 100644 --- a/vendor/github.com/quic-go/quic-go/transport.go +++ b/vendor/github.com/quic-go/quic-go/transport.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "crypto/tls" "errors" + "fmt" "net" "sync" "sync/atomic" @@ -16,6 +17,31 @@ import ( "github.com/quic-go/quic-go/logging" ) +// ErrTransportClosed is returned by the [Transport]'s Listen or Dial method after it was closed. +var ErrTransportClosed = &errTransportClosed{} + +type errTransportClosed struct { + err error +} + +func (e *errTransportClosed) Unwrap() []error { return []error{net.ErrClosed, e.err} } + +func (e *errTransportClosed) Error() string { + if e.err == nil { + return "quic: transport closed" + } + return fmt.Sprintf("quic: transport closed: %s", e.err) +} + +func (e *errTransportClosed) Is(target error) bool { + _, ok := target.(*errTransportClosed) + return ok +} + +type transportID uint64 + +var transportIDCounter atomic.Uint64 + var errListenerAlreadySet = errors.New("listener already set") // The Transport is the central point to manage incoming and outgoing QUIC connections. @@ -23,7 +49,7 @@ var errListenerAlreadySet = errors.New("listener already set") // This means that a single UDP socket can be used for listening for incoming connections, as well as // for dialing an arbitrary number of outgoing connections. // A Transport handles a single net.PacketConn, and offers a range of configuration options -// compared to the simple helper functions like Listen and Dial that this package provides. +// compared to the simple helper functions like [Listen] and [Dial] that this package provides. type Transport struct { // A single net.PacketConn can only be handled by one Transport. // Bad things will happen if passed to multiple Transports. @@ -111,11 +137,13 @@ type Transport struct { initErr error // Set in init. + transportID transportID // If no ConnectionIDGenerator is set, this is the ConnectionIDLength. connIDLen int // Set in init. // If no ConnectionIDGenerator is set, this is set to a default. - connIDGenerator ConnectionIDGenerator + connIDGenerator ConnectionIDGenerator + statelessResetter *statelessResetter server *baseServer @@ -125,7 +153,7 @@ type Transport struct { statelessResetQueue chan receivedPacket listening chan struct{} // is closed when listen returns - closed bool + closeErr error createdConn bool isSingleUse bool // was created for a single server or client, i.e. by calling quic.Listen or quic.Dial @@ -137,7 +165,7 @@ type Transport struct { // Listen starts listening for incoming QUIC connections. // There can only be a single listener on any net.PacketConn. -// Listen may only be called again after the current Listener was closed. +// Listen may only be called again after the current listener was closed. func (t *Transport) Listen(tlsConf *tls.Config, conf *Config) (*Listener, error) { s, err := t.createServer(tlsConf, conf, false) if err != nil { @@ -148,7 +176,7 @@ func (t *Transport) Listen(tlsConf *tls.Config, conf *Config) (*Listener, error) // ListenEarly starts listening for incoming QUIC connections. // There can only be a single listener on any net.PacketConn. -// Listen may only be called again after the current Listener was closed. +// ListenEarly may only be called again after the current listener was closed. func (t *Transport) ListenEarly(tlsConf *tls.Config, conf *Config) (*EarlyListener, error) { s, err := t.createServer(tlsConf, conf, true) if err != nil { @@ -168,6 +196,9 @@ func (t *Transport) createServer(tlsConf *tls.Config, conf *Config, allow0RTT bo t.mutex.Lock() defer t.mutex.Unlock() + if t.closeErr != nil { + return nil, t.closeErr + } if t.server != nil { return nil, errListenerAlreadySet } @@ -175,17 +206,22 @@ func (t *Transport) createServer(tlsConf *tls.Config, conf *Config, allow0RTT bo if err := t.init(false); err != nil { return nil, err } + maxTokenAge := t.MaxTokenAge + if maxTokenAge == 0 { + maxTokenAge = 24 * time.Hour + } s := newServer( t.conn, - t.handlerMap, + t, t.connIDGenerator, + t.statelessResetter, t.ConnContext, tlsConf, conf, t.Tracer, t.closeServer, *t.TokenGeneratorKey, - t.MaxTokenAge, + maxTokenAge, t.VerifySourceAddress, t.DisableVersionNegotiationPackets, allow0RTT, @@ -205,24 +241,142 @@ func (t *Transport) DialEarly(ctx context.Context, addr net.Addr, tlsConf *tls.C } func (t *Transport) dial(ctx context.Context, addr net.Addr, host string, tlsConf *tls.Config, conf *Config, use0RTT bool) (EarlyConnection, error) { + if err := t.init(t.isSingleUse); err != nil { + return nil, err + } if err := validateConfig(conf); err != nil { return nil, err } conf = populateConfig(conf) - if err := t.init(t.isSingleUse); err != nil { - return nil, err - } - var onClose func() - if t.isSingleUse { - onClose = func() { t.Close() } - } tlsConf = tlsConf.Clone() setTLSConfigServerName(tlsConf, addr, host) - return dial(ctx, newSendConn(t.conn, addr, packetInfo{}, utils.DefaultLogger), t.connIDGenerator, t.handlerMap, tlsConf, conf, onClose, use0RTT) + return t.doDial(ctx, + newSendConn(t.conn, addr, packetInfo{}, utils.DefaultLogger), + tlsConf, + conf, + 0, + false, + use0RTT, + conf.Versions[0], + ) +} + +func (t *Transport) doDial( + ctx context.Context, + sendConn sendConn, + tlsConf *tls.Config, + config *Config, + initialPacketNumber protocol.PacketNumber, + hasNegotiatedVersion bool, + use0RTT bool, + version protocol.Version, +) (quicConn, error) { + srcConnID, err := t.connIDGenerator.GenerateConnectionID() + if err != nil { + return nil, err + } + destConnID, err := generateConnectionIDForInitial() + if err != nil { + return nil, err + } + + tracingID := nextConnTracingID() + ctx = context.WithValue(ctx, ConnectionTracingKey, tracingID) + + t.mutex.Lock() + if t.closeErr != nil { + t.mutex.Unlock() + return nil, t.closeErr + } + + var tracer *logging.ConnectionTracer + if config.Tracer != nil { + tracer = config.Tracer(ctx, protocol.PerspectiveClient, destConnID) + } + if tracer != nil && tracer.StartedConnection != nil { + tracer.StartedConnection(sendConn.LocalAddr(), sendConn.RemoteAddr(), srcConnID, destConnID) + } + + logger := utils.DefaultLogger.WithPrefix("client") + logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", tlsConf.ServerName, sendConn.LocalAddr(), sendConn.RemoteAddr(), srcConnID, destConnID, version) + + conn := newClientConnection( + context.WithoutCancel(ctx), + sendConn, + t, + destConnID, + srcConnID, + t.connIDGenerator, + t.statelessResetter, + config, + tlsConf, + initialPacketNumber, + use0RTT, + hasNegotiatedVersion, + tracer, + logger, + version, + ) + t.handlerMap.Add(srcConnID, conn) + t.mutex.Unlock() + + // The error channel needs to be buffered, as the run loop will continue running + // after doDial returns (if the handshake is successful). + errChan := make(chan error, 1) + recreateChan := make(chan errCloseForRecreating) + go func() { + err := conn.run() + var recreateErr *errCloseForRecreating + if errors.As(err, &recreateErr) { + recreateChan <- *recreateErr + return + } + if t.isSingleUse { + t.Close() + } + errChan <- err + }() + + // Only set when we're using 0-RTT. + // Otherwise, earlyConnChan will be nil. Receiving from a nil chan blocks forever. + var earlyConnChan <-chan struct{} + if use0RTT { + earlyConnChan = conn.earlyConnReady() + } + + select { + case <-ctx.Done(): + conn.destroy(nil) + // wait until the Go routine that called Connection.run() returns + select { + case <-errChan: + case <-recreateChan: + } + return nil, context.Cause(ctx) + case params := <-recreateChan: + return t.doDial(ctx, + sendConn, + tlsConf, + config, + params.nextPacketNumber, + true, + use0RTT, + params.nextVersion, + ) + case err := <-errChan: + return nil, err + case <-earlyConnChan: + // ready to send 0-RTT data + return conn, nil + case <-conn.HandshakeComplete(): + // handshake successfully completed + return conn, nil + } } func (t *Transport) init(allowZeroLengthConnIDs bool) error { t.initOnce.Do(func() { + t.transportID = transportID(transportIDCounter.Add(1)) var conn rawConn if c, ok := t.Conn.(rawConn); ok { conn = c @@ -237,7 +391,9 @@ func (t *Transport) init(allowZeroLengthConnIDs bool) error { t.logger = utils.DefaultLogger // TODO: make this configurable t.conn = conn - t.handlerMap = newPacketHandlerMap(t.StatelessResetKey, t.enqueueClosePacket, t.logger) + if t.handlerMap == nil { // allows mocking the handlerMap in tests + t.handlerMap = newPacketHandlerMap(t.enqueueClosePacket, t.logger) + } t.listening = make(chan struct{}) t.closeQueue = make(chan closePacket, 4) @@ -262,14 +418,20 @@ func (t *Transport) init(allowZeroLengthConnIDs bool) error { t.connIDLen = connIDLen t.connIDGenerator = &protocol.DefaultConnectionIDGenerator{ConnLen: t.connIDLen} } + t.statelessResetter = newStatelessResetter(t.StatelessResetKey) - getMultiplexer().AddConn(t.Conn) go t.listen(conn) go t.runSendQueue() }) return t.initErr } +func (t *Transport) connRunner() packetHandlerManager { + return t.handlerMap +} + +func (t *Transport) id() transportID { return t.transportID } + // WriteTo sends a packet on the underlying connection. func (t *Transport) WriteTo(b []byte, addr net.Addr) (int, error) { if err := t.init(false); err != nil { @@ -300,11 +462,14 @@ func (t *Transport) runSendQueue() { } } -// Close closes the underlying connection. +// Close stops listening for UDP datagrams on the Transport.Conn. // If any listener was started, it will be closed as well. // It is invalid to start new listeners or connections after that. func (t *Transport) Close() error { - t.close(errors.New("closing")) + // avoid race condition if the transport is currently being initialized + t.init(false) + + t.close(nil) if t.createdConn { if err := t.Conn.Close(); err != nil { return err @@ -323,7 +488,7 @@ func (t *Transport) closeServer() { t.mutex.Lock() t.server = nil if t.isSingleUse { - t.closed = true + t.closeErr = ErrServerClosed } t.mutex.Unlock() if t.createdConn { @@ -339,10 +504,12 @@ func (t *Transport) closeServer() { func (t *Transport) close(e error) { t.mutex.Lock() defer t.mutex.Unlock() - if t.closed { + + if t.closeErr != nil { return } + e = &errTransportClosed{err: e} if t.handlerMap != nil { t.handlerMap.Close(e) } @@ -352,7 +519,7 @@ func (t *Transport) close(e error) { if t.Tracer != nil && t.Tracer.Close != nil { t.Tracer.Close() } - t.closed = true + t.closeErr = e } // only print warnings about the UDP receive buffer size once @@ -360,7 +527,6 @@ var setBufferWarningOnce sync.Once func (t *Transport) listen(conn rawConn) { defer close(t.listening) - defer getMultiplexer().RemoveConn(t.Conn) for { p, err := conn.ReadPacket() @@ -370,7 +536,7 @@ func (t *Transport) listen(conn rawConn) { // See https://github.com/quic-go/quic-go/issues/1737 for details. if nerr, ok := err.(net.Error); ok && nerr.Temporary() { t.mutex.Lock() - closed := t.closed + closed := t.closeErr != nil t.mutex.Unlock() if closed { return @@ -424,7 +590,12 @@ func (t *Transport) handlePacket(p receivedPacket) { return } if !wire.IsLongHeaderPacket(p.data[0]) { - t.maybeSendStatelessReset(p) + if statelessResetQueued := t.maybeSendStatelessReset(p); !statelessResetQueued { + if t.Tracer != nil && t.Tracer.DroppedPacket != nil { + t.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnknownConnectionID) + } + p.buffer.Release() + } return } @@ -432,29 +603,32 @@ func (t *Transport) handlePacket(p receivedPacket) { defer t.mutex.Unlock() if t.server == nil { // no server set t.logger.Debugf("received a packet with an unexpected connection ID %s", connID) + if t.Tracer != nil && t.Tracer.DroppedPacket != nil { + t.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnknownConnectionID) + } + p.buffer.MaybeRelease() return } t.server.handlePacket(p) } -func (t *Transport) maybeSendStatelessReset(p receivedPacket) { +func (t *Transport) maybeSendStatelessReset(p receivedPacket) (statelessResetQueued bool) { if t.StatelessResetKey == nil { - p.buffer.Release() - return + return false } // Don't send a stateless reset in response to very small packets. // This includes packets that could be stateless resets. if len(p.data) <= protocol.MinStatelessResetSize { - p.buffer.Release() - return + return false } select { case t.statelessResetQueue <- p: + return true default: // it's fine to not send a stateless reset when we're busy - p.buffer.Release() + return false } } @@ -466,7 +640,7 @@ func (t *Transport) sendStatelessReset(p receivedPacket) { t.logger.Errorf("error parsing connection ID on packet from %s: %s", p.remoteAddr, err) return } - token := t.handlerMap.GetStatelessResetToken(connID) + token := t.statelessResetter.GetStatelessResetToken(connID) t.logger.Debugf("Sending stateless reset to %s (connection ID: %s). Token: %#x", p.remoteAddr, connID, token) data := make([]byte, protocol.MinStatelessResetSize-16, protocol.MinStatelessResetSize) rand.Read(data) @@ -489,7 +663,7 @@ func (t *Transport) maybeHandleStatelessReset(data []byte) bool { token := *(*protocol.StatelessResetToken)(data[len(data)-16:]) if conn, ok := t.handlerMap.GetByResetToken(token); ok { t.logger.Debugf("Received a stateless reset with token %#x. Closing connection.", token) - go conn.destroy(&StatelessResetError{Token: token}) + go conn.destroy(&StatelessResetError{}) return true } return false diff --git a/vendor/github.com/quic-go/quic-go/window_update_queue.go b/vendor/github.com/quic-go/quic-go/window_update_queue.go deleted file mode 100644 index 9ed12143..00000000 --- a/vendor/github.com/quic-go/quic-go/window_update_queue.go +++ /dev/null @@ -1,71 +0,0 @@ -package quic - -import ( - "sync" - - "github.com/quic-go/quic-go/internal/flowcontrol" - "github.com/quic-go/quic-go/internal/protocol" - "github.com/quic-go/quic-go/internal/wire" -) - -type windowUpdateQueue struct { - mutex sync.Mutex - - queue map[protocol.StreamID]struct{} // used as a set - queuedConn bool // connection-level window update - - streamGetter streamGetter - connFlowController flowcontrol.ConnectionFlowController - callback func(wire.Frame) -} - -func newWindowUpdateQueue( - streamGetter streamGetter, - connFC flowcontrol.ConnectionFlowController, - cb func(wire.Frame), -) *windowUpdateQueue { - return &windowUpdateQueue{ - queue: make(map[protocol.StreamID]struct{}), - streamGetter: streamGetter, - connFlowController: connFC, - callback: cb, - } -} - -func (q *windowUpdateQueue) AddStream(id protocol.StreamID) { - q.mutex.Lock() - q.queue[id] = struct{}{} - q.mutex.Unlock() -} - -func (q *windowUpdateQueue) AddConnection() { - q.mutex.Lock() - q.queuedConn = true - q.mutex.Unlock() -} - -func (q *windowUpdateQueue) QueueAll() { - q.mutex.Lock() - // queue a connection-level window update - if q.queuedConn { - q.callback(&wire.MaxDataFrame{MaximumData: q.connFlowController.GetWindowUpdate()}) - q.queuedConn = false - } - // queue all stream-level window updates - for id := range q.queue { - delete(q.queue, id) - str, err := q.streamGetter.GetOrOpenReceiveStream(id) - if err != nil || str == nil { // the stream can be nil if it was completed before dequeing the window update - continue - } - offset := str.getWindowUpdate() - if offset == 0 { // can happen if we received a final offset, right after queueing the window update - continue - } - q.callback(&wire.MaxStreamDataFrame{ - StreamID: id, - MaximumStreamData: offset, - }) - } - q.mutex.Unlock() -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 4d4b4aad..7e19eba0 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -7,10 +7,13 @@ import ( "time" ) -type CompareType int +// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it. +type CompareType = compareResult + +type compareResult int const ( - compareLess CompareType = iota - 1 + compareLess compareResult = iota - 1 compareEqual compareGreater ) @@ -39,7 +42,7 @@ var ( bytesType = reflect.TypeOf([]byte{}) ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) { obj1Value := reflect.ValueOf(obj1) obj2Value := reflect.ValueOf(obj2) @@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) } - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + if timeObj1.Before(timeObj2) { + return compareLess, true + } + if timeObj1.Equal(timeObj2) { + return compareEqual, true + } + return compareGreater, true } case reflect.Slice: { @@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) } - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true } case reflect.Uintptr: { @@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) } -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare return true } -func containsValue(values []CompareType, value CompareType) bool { +func containsValue(values []compareResult, value compareResult) bool { for _, v := range values { if v == value { return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 3ddab109..19063416 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index a84e09bd..21629087 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { @@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin return NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface return NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a0..1d2f7182 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -6,7 +6,7 @@ import ( ) // isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { objKind := reflect.TypeOf(object).Kind() if objKind != reflect.Slice && objKind != reflect.Array { return false @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0b7570f2..4e91332b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" + + // Wrapper around gopkg.in/yaml.v3 + "github.com/stretchr/testify/assert/yaml" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool +// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful +// for table driven tests. +type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool + // Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + return Fail(t, "Both arguments must be pointers", msgAndArgs...) + } + + if !same { + // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ "expected: %p %#v\n"+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) @@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} h.Helper() } - if samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + //fails when the arguments are not pointers + return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) + } + + if same { return Fail(t, fmt.Sprintf( "Expected and actual point to the same object: %p %#v", expected, expected), msgAndArgs...) @@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} return true } -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { +// samePointers checks if two generic interface objects are pointers of the same +// type pointing to the same object. It returns two values: same indicating if +// they are the same type and point to the same object, and ok indicating that +// both inputs are pointers. +func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + return false, false //not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) if firstType != secondType { - return false + return false, true // both are pointers, but of different types } // compare pointer addresses - return first == second + return first == second, true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - expected = copyExportedFields(expected) actual = copyExportedFields(actual) @@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri return msg.String() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + if !isList(t, listA, msgAndArgs...) { + return Fail(t, "listA is not a list type", msgAndArgs...) + } + if !isList(t, listB, msgAndArgs...) { + return Fail(t, "listB is not a list type", msgAndArgs...) + } + + extraA, extraB := diffLists(listA, listB) + if len(extraA) == 0 && len(extraB) == 0 { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if err != nil { return Fail(t, err.Error(), msgAndArgs...) } + if math.IsNaN(actualEpsilon) { + return Fail(t, "relative error is NaN", msgAndArgs...) + } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) @@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { - var r *regexp.Regexp if rr, ok := rx.(*regexp.Regexp); ok { r = rr @@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool { r = regexp.MustCompile(fmt.Sprint(rx)) } - return (r.FindStringIndex(fmt.Sprint(str)) != nil) + switch v := str.(type) { + case []byte: + return r.Match(v) + case string: + return r.MatchString(v) + default: + return r.MatchString(fmt.Sprint(v)) + } } @@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{ MaxDepth: 10, } -type tHelper interface { +type tHelper = interface { Helper() } @@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // CollectT implements the TestingT interface and collects all errors. type CollectT struct { + // A slice of errors. Non-nil slice denotes a failure. + // If it's non-nil but len(c.errors) == 0, this is also a failure + // obtained by direct c.FailNow() call. errors []error } @@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) } -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") +// FailNow stops execution by calling runtime.Goexit. +func (c *CollectT) FailNow() { + c.fail() + runtime.Goexit() } // Deprecated: That was a method for internal usage that should not have been published. Now just panics. @@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) { panic("Copy() is deprecated") } +func (c *CollectT) fail() { + if !c.failed() { + c.errors = []error{} // Make it non-nil to mark a failure. + } +} + +func (c *CollectT) failed() bool { + return c.errors != nil +} + // EventuallyWithT asserts that given condition will be met in waitFor time, // periodically checking target function each tick. In contrast to Eventually, // it supplies a CollectT to the condition function, so that the condition @@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) { // assert.EventuallyWithT(t, func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } var lastFinishedTickErrs []error - ch := make(chan []error, 1) + ch := make(chan *CollectT, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time go func() { collect := new(CollectT) defer func() { - ch <- collect.errors + ch <- collect }() condition(collect) }() - case errs := <-ch: - if len(errs) == 0 { + case collect := <-ch: + if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs + lastFinishedTickErrs = collect.errors tick = ticker.C } } @@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { ), msgAndArgs...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ ), msgAndArgs...) } +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + func buildErrorChainString(err error) string { if err == nil { return "" diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go new file mode 100644 index 00000000..baa0cc7d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -0,0 +1,25 @@ +//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default +// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that calls a pluggable implementation. +// +// This implementation is selected with the testify_yaml_custom build tag. +// +// go test -tags testify_yaml_custom +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]. +// +// In your test package: +// +// import assertYaml "github.com/stretchr/testify/assert/yaml" +// +// func init() { +// assertYaml.Unmarshal = func (in []byte, out interface{}) error { +// // ... +// return nil +// } +// } +package yaml + +var Unmarshal func(in []byte, out interface{}) error diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go new file mode 100644 index 00000000..b83c6cf6 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -0,0 +1,37 @@ +//go:build !testify_yaml_fail && !testify_yaml_custom +// +build !testify_yaml_fail,!testify_yaml_custom + +// Package yaml is just an indirection to handle YAML deserialization. +// +// This package is just an indirection that allows the builder to override the +// indirection with an alternative implementation of this package that uses +// another implementation of YAML deserialization. This allows to not either not +// use YAML deserialization at all, or to use another implementation than +// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]). +// +// Alternative implementations are selected using build tags: +// +// - testify_yaml_fail: [Unmarshal] always fails with an error +// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it +// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or +// [github.com/stretchr/testify/assert.YAMLEqf]. +// +// Usage: +// +// go test -tags testify_yaml_fail +// +// You can check with "go list" which implementation is linked: +// +// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// +// [PR #1120]: https://github.com/stretchr/testify/pull/1120 +package yaml + +import goyaml "gopkg.in/yaml.v3" + +// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal]. +func Unmarshal(in []byte, out interface{}) error { + return goyaml.Unmarshal(in, out) +} diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go new file mode 100644 index 00000000..e78f7dfe --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -0,0 +1,18 @@ +//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default +// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that always fail. +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]: +// +// go test -tags testify_yaml_fail +package yaml + +import "errors" + +var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)") + +func Unmarshal([]byte, interface{}) error { + return errNotImplemented +} diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 506a82f8..d8921950 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -34,9 +34,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// require.Contains(t, "Hello World", "World") +// require.Contains(t, ["Hello", "World"], "World") +// require.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -50,9 +50,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// require.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// require.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// require.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -91,7 +91,7 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +// require.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -106,7 +106,7 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +// require.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -120,7 +120,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Empty(t, obj) +// require.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -134,7 +134,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// require.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -147,7 +147,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// require.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -166,7 +166,7 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// require.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -181,7 +181,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// require.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -200,8 +200,8 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // Exported int // notExported int // } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false +// require.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true +// require.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -220,8 +220,8 @@ func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, m // Exported int // notExported int // } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false +// require.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true +// require.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -232,10 +232,10 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// require.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -246,10 +246,10 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// require.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -262,7 +262,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// require.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -280,8 +280,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) +// if require.Error(t, err) { +// require.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -321,7 +321,7 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// require.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -336,7 +336,7 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// require.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -374,8 +374,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) +// if require.Errorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -390,7 +390,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// require.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -415,10 +415,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { +// require.EventuallyWithT(t, func(c *require.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -443,10 +443,10 @@ func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitF // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { +// require.EventuallyWithTf(t, func(c *require.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -460,7 +460,7 @@ func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), wait // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -473,7 +473,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// require.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -486,7 +486,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// require.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -543,7 +543,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// require.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -556,7 +556,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// require.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -593,9 +593,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// require.Greater(t, 2, 1) +// require.Greater(t, float64(2), float64(1)) +// require.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -608,10 +608,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// require.GreaterOrEqual(t, 2, 1) +// require.GreaterOrEqual(t, 2, 2) +// require.GreaterOrEqual(t, "b", "a") +// require.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -624,10 +624,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -640,9 +640,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// require.Greaterf(t, 2, 1, "error message %s", "formatted") +// require.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// require.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -656,7 +656,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -672,7 +672,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -688,7 +688,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -704,7 +704,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -719,7 +719,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -734,7 +734,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -749,7 +749,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -764,7 +764,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -779,7 +779,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// require.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { @@ -794,7 +794,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// require.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { @@ -809,7 +809,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// require.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -824,7 +824,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// require.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -839,7 +839,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// require.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -852,7 +852,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -865,7 +865,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// require.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -922,7 +922,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// require.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -979,9 +979,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// require.IsDecreasing(t, []int{2, 1, 0}) +// require.IsDecreasing(t, []float{2, 1}) +// require.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -994,9 +994,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// require.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1009,9 +1009,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// require.IsIncreasing(t, []int{1, 2, 3}) +// require.IsIncreasing(t, []float{1, 2}) +// require.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1024,9 +1024,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// require.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1039,9 +1039,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// require.IsNonDecreasing(t, []int{1, 1, 2}) +// require.IsNonDecreasing(t, []float{1, 2}) +// require.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1054,9 +1054,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1069,9 +1069,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// require.IsNonIncreasing(t, []int{2, 1, 1}) +// require.IsNonIncreasing(t, []float{2, 1}) +// require.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1084,9 +1084,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1121,7 +1121,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// require.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1134,7 +1134,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// require.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1148,7 +1148,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// require.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1162,7 +1162,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// require.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1175,9 +1175,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// require.Less(t, 1, 2) +// require.Less(t, float64(1), float64(2)) +// require.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1190,10 +1190,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// require.LessOrEqual(t, 1, 2) +// require.LessOrEqual(t, 2, 2) +// require.LessOrEqual(t, "a", "b") +// require.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1206,10 +1206,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1222,9 +1222,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// require.Lessf(t, 1, 2, "error message %s", "formatted") +// require.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// require.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1237,8 +1237,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// require.Negative(t, -1) +// require.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1251,8 +1251,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// require.Negativef(t, -1, "error message %s", "formatted") +// require.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1266,7 +1266,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// require.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1280,7 +1280,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1293,7 +1293,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// require.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1306,7 +1306,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// require.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1344,8 +1344,8 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoError(t, err) { +// require.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1360,8 +1360,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // NoErrorf asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoErrorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1400,9 +1400,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// require.NotContains(t, "Hello World", "Earth") +// require.NotContains(t, ["Hello", "World"], "Earth") +// require.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1416,9 +1416,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// require.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// require.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// require.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1429,11 +1429,51 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a t.FailNow() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// require.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatch(t, listA, listB, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// require.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatchf(t, listA, listB, msg, args...) { + return + } + t.FailNow() +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmpty(t, obj) { +// require.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1448,8 +1488,8 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmptyf(t, obj, "error message %s", "formatted") { +// require.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1463,7 +1503,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// require.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1479,7 +1519,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// require.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1492,7 +1532,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1505,7 +1545,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1519,7 +1559,31 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1531,7 +1595,7 @@ func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) t.FailNow() } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1545,7 +1609,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotImplements asserts that an object does not implement the specified interface. // -// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +// require.NotImplements(t, (*MyInterface)(nil), new(MyObject)) func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1558,7 +1622,7 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, // NotImplementsf asserts that an object does not implement the specified interface. // -// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1571,7 +1635,7 @@ func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// require.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1584,7 +1648,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// require.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1597,7 +1661,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// require.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1610,7 +1674,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// require.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1623,8 +1687,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// require.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1637,8 +1701,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// require.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// require.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1651,7 +1715,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// require.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1667,7 +1731,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// require.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1685,8 +1749,8 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2]) -// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], [1, 2]) +// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1701,8 +1765,8 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") -// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1737,7 +1801,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// require.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1752,7 +1816,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1767,7 +1831,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1781,7 +1845,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1795,7 +1859,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1808,7 +1872,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// require.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1821,8 +1885,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// require.Positive(t, 1) +// require.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1835,8 +1899,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// require.Positivef(t, 1, "error message %s", "formatted") +// require.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1849,8 +1913,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// require.Regexp(t, regexp.MustCompile("start"), "it's starting") +// require.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1863,8 +1927,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// require.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// require.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1877,7 +1941,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// require.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1893,7 +1957,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// require.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1910,8 +1974,8 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg // Subset asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2]) -// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], [1, 2]) +// require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1925,8 +1989,8 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // Subsetf asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") -// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1939,7 +2003,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // True asserts that the specified value is true. // -// assert.True(t, myBool) +// require.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1952,7 +2016,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// require.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1965,7 +2029,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1978,7 +2042,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// require.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1991,7 +2055,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// require.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -2004,7 +2068,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// require.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index 55e42dde..8b328368 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,4 +1,4 @@ -{{.Comment}} +{{ replace .Comment "assert." "require."}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { if h, ok := t.(tHelper); ok { h.Helper() } if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index eee8310a..1bd87304 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -187,8 +187,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { @@ -198,8 +198,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { @@ -337,7 +337,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -362,7 +362,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1129,6 +1129,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1201,7 +1235,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1210,7 +1262,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 91772dfe..6b7ce929 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -6,7 +6,7 @@ type TestingT interface { FailNow() } -type tHelper interface { +type tHelper = interface { Helper() } diff --git a/vendor/go.uber.org/automaxprocs/.codecov.yml b/vendor/go.uber.org/automaxprocs/.codecov.yml new file mode 100644 index 00000000..9a2ed4a9 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.codecov.yml @@ -0,0 +1,14 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 90% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure diff --git a/vendor/go.uber.org/automaxprocs/.gitignore b/vendor/go.uber.org/automaxprocs/.gitignore new file mode 100644 index 00000000..dd7bcf51 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.gitignore @@ -0,0 +1,33 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log +coverage.txt + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/automaxprocs/CHANGELOG.md b/vendor/go.uber.org/automaxprocs/CHANGELOG.md new file mode 100644 index 00000000..f421056a --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CHANGELOG.md @@ -0,0 +1,52 @@ +# Changelog + +## v1.6.0 (2024-07-24) + +- Add RoundQuotaFunc option that allows configuration of rounding + behavior for floating point CPU quota. + +## v1.5.3 (2023-07-19) + +- Fix mountinfo parsing when super options have fields with spaces. +- Fix division by zero while parsing cgroups. + +## v1.5.2 (2023-03-16) + +- Support child control cgroups +- Fix file descriptor leak +- Update dependencies + +## v1.5.1 (2022-04-06) + +- Fix cgroups v2 mountpoint detection. + +## v1.5.0 (2022-04-05) + +- Add support for cgroups v2. + +Thanks to @emadolsky for their contribution to this release. + +## v1.4.0 (2021-02-01) + +- Support colons in cgroup names. +- Remove linters from runtime dependencies. + +## v1.3.0 (2020-01-23) + +- Migrate to Go modules. + +## v1.2.0 (2018-02-22) + +- Fixed quota clamping to always round down rather than up; Rather than + guaranteeing constant throttling at saturation, instead assume that the + fractional CPU was added as a hedge for factors outside of Go's scheduler. + +## v1.1.0 (2017-11-10) + +- Log the new value of `GOMAXPROCS` rather than the current value. +- Make logs more explicit about whether `GOMAXPROCS` was modified or not. +- Allow customization of the minimum `GOMAXPROCS`, and modify default from 2 to 1. + +## v1.0.0 (2017-08-09) + +- Initial release. diff --git a/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..e327d9aa --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md new file mode 100644 index 00000000..2b6a6040 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing + +We'd love your help improving this package! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +``` +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/automaxprocs.git +cd automaxprocs +git remote add upstream https://github.com/uber-go/automaxprocs.git +git fetch upstream +``` + +Install the test dependencies: + +``` +make dependencies +``` + +Make sure that the tests and the linters pass: + +``` +make test +make lint +``` + +If you're not using the minor version of Go specified in the Makefile's +`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is +fine, but it means that you'll only discover lint failures after you open your +pull request. + +## Making Changes + +Start by creating a new branch for your changes: + +``` +cd $GOPATH/src/go.uber.org/automaxprocs +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +``` +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We *try* to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +* Add tests for new functionality. +* Write a [good commit message][commit-message]. +* Maintain backward compatibility. + +[fork]: https://github.com/uber-go/automaxprocs/fork +[open-issue]: https://github.com/uber-go/automaxprocs/issues/new +[cla]: https://cla-assistant.io/uber-go/automaxprocs +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/automaxprocs/Makefile b/vendor/go.uber.org/automaxprocs/Makefile new file mode 100644 index 00000000..1642b714 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/Makefile @@ -0,0 +1,46 @@ +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck + +.PHONY: build +build: + go build ./... + +.PHONY: install +install: + go mod download + +.PHONY: test +test: + go test -race ./... + +.PHONY: cover +cover: + go test -coverprofile=cover.out -covermode=atomic -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html + +$(GOLINT): tools/go.mod + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): tools/go.mod + cd tools && go install honnef.co/go/tools/cmd/staticcheck@2023.1.2 + +.PHONY: lint +lint: $(GOLINT) $(STATICCHECK) + @rm -rf lint.log + @echo "Checking gofmt" + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log + @echo "Checking go vet" + @go vet ./... 2>&1 | tee -a lint.log + @echo "Checking golint" + @$(GOLINT) ./... | tee -a lint.log + @echo "Checking staticcheck" + @$(STATICCHECK) ./... 2>&1 | tee -a lint.log + @echo "Checking for license headers..." + @./.build/check_license.sh | tee -a lint.log + @[ ! -s lint.log ] diff --git a/vendor/go.uber.org/automaxprocs/README.md b/vendor/go.uber.org/automaxprocs/README.md new file mode 100644 index 00000000..bfed32ad --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/README.md @@ -0,0 +1,71 @@ +# automaxprocs [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Automatically set `GOMAXPROCS` to match Linux container CPU quota. + +## Installation + +`go get -u go.uber.org/automaxprocs` + +## Quick Start + +```go +import _ "go.uber.org/automaxprocs" + +func main() { + // Your application logic here. +} +``` + +# Performance +Data measured from Uber's internal load balancer. We ran the load balancer with 200% CPU quota (i.e., 2 cores): + +| GOMAXPROCS | RPS | P50 (ms) | P99.9 (ms) | +| ------------------ | --------- | -------- | ---------- | +| 1 | 28,893.18 | 1.46 | 19.70 | +| 2 (equal to quota) | 44,715.07 | 0.84 | 26.38 | +| 3 | 44,212.93 | 0.66 | 30.07 | +| 4 | 41,071.15 | 0.57 | 42.94 | +| 8 | 33,111.69 | 0.43 | 64.32 | +| Default (24) | 22,191.40 | 0.45 | 76.19 | + +When `GOMAXPROCS` is increased above the CPU quota, we see P50 decrease slightly, but see significant increases to P99. We also see that the total RPS handled also decreases. + +When `GOMAXPROCS` is higher than the CPU quota allocated, we also saw significant throttling: + +``` +$ cat /sys/fs/cgroup/cpu,cpuacct/system.slice/[...]/cpu.stat +nr_periods 42227334 +nr_throttled 131923 +throttled_time 88613212216618 +``` + +Once `GOMAXPROCS` was reduced to match the CPU quota, we saw no CPU throttling. + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +automaxprocs to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The automaxprocs maintainers keep +an eye on issues and pull requests, but you can also report any negative +conduct to oss-conduct@uber.com. That email list is a private, safe space; +even the automaxprocs maintainers don't have access, so don't hesitate to hold +us to a high standard. + +
+ +Released under the [MIT License](LICENSE). + +[doc-img]: https://godoc.org/go.uber.org/automaxprocs?status.svg +[doc]: https://godoc.org/go.uber.org/automaxprocs +[ci-img]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/automaxprocs/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/automaxprocs + + diff --git a/vendor/go.uber.org/automaxprocs/automaxprocs.go b/vendor/go.uber.org/automaxprocs/automaxprocs.go new file mode 100644 index 00000000..69946a3e --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/automaxprocs.go @@ -0,0 +1,33 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package automaxprocs automatically sets GOMAXPROCS to match the Linux +// container CPU quota, if any. +package automaxprocs // import "go.uber.org/automaxprocs" + +import ( + "log" + + "go.uber.org/automaxprocs/maxprocs" +) + +func init() { + maxprocs.Set(maxprocs.Logger(log.Printf)) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go index 1257d0c9..fe4ecf56 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go @@ -18,6 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +//go:build linux // +build linux package cgroups diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go index e2489b82..e89f5436 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go @@ -18,6 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +//go:build linux // +build linux package cgroups @@ -109,8 +110,8 @@ func (cg CGroups) CPUQuota() (float64, bool, error) { } cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam) - if err != nil { - return -1, false, err + if defined := cfsPeriodUs > 0; err != nil || !defined { + return -1, defined, err } return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go new file mode 100644 index 00000000..78556062 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go @@ -0,0 +1,176 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" +) + +const ( + // _cgroupv2CPUMax is the file name for the CGroup-V2 CPU max and period + // parameter. + _cgroupv2CPUMax = "cpu.max" + // _cgroupFSType is the Linux CGroup-V2 file system type used in + // `/proc/$PID/mountinfo`. + _cgroupv2FSType = "cgroup2" + + _cgroupv2MountPoint = "/sys/fs/cgroup" + + _cgroupV2CPUMaxDefaultPeriod = 100000 + _cgroupV2CPUMaxQuotaMax = "max" +) + +const ( + _cgroupv2CPUMaxQuotaIndex = iota + _cgroupv2CPUMaxPeriodIndex +) + +// ErrNotV2 indicates that the system is not using cgroups2. +var ErrNotV2 = errors.New("not using cgroups2") + +// CGroups2 provides access to cgroups data for systems using cgroups2. +type CGroups2 struct { + mountPoint string + groupPath string + cpuMaxFile string +} + +// NewCGroups2ForCurrentProcess builds a CGroups2 for the current process. +// +// This returns ErrNotV2 if the system is not using cgroups2. +func NewCGroups2ForCurrentProcess() (*CGroups2, error) { + return newCGroups2From(_procPathMountInfo, _procPathCGroup) +} + +func newCGroups2From(mountInfoPath, procPathCGroup string) (*CGroups2, error) { + isV2, err := isCGroupV2(mountInfoPath) + if err != nil { + return nil, err + } + + if !isV2 { + return nil, ErrNotV2 + } + + subsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + // Find v2 subsystem by looking for the `0` id + var v2subsys *CGroupSubsys + for _, subsys := range subsystems { + if subsys.ID == 0 { + v2subsys = subsys + break + } + } + + if v2subsys == nil { + return nil, ErrNotV2 + } + + return &CGroups2{ + mountPoint: _cgroupv2MountPoint, + groupPath: v2subsys.Name, + cpuMaxFile: _cgroupv2CPUMax, + }, nil +} + +func isCGroupV2(procPathMountInfo string) (bool, error) { + var ( + isV2 bool + newMountPoint = func(mp *MountPoint) error { + isV2 = isV2 || (mp.FSType == _cgroupv2FSType && mp.MountPoint == _cgroupv2MountPoint) + return nil + } + ) + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return false, err + } + + return isV2, nil +} + +// CPUQuota returns the CPU quota applied with the CPU cgroup2 controller. +// It is a result of reading cpu quota and period from cpu.max file. +// It will return `cpu.max / cpu.period`. If cpu.max is set to max, it returns +// (-1, false, nil) +func (cg *CGroups2) CPUQuota() (float64, bool, error) { + cpuMaxParams, err := os.Open(path.Join(cg.mountPoint, cg.groupPath, cg.cpuMaxFile)) + if err != nil { + if os.IsNotExist(err) { + return -1, false, nil + } + return -1, false, err + } + defer cpuMaxParams.Close() + + scanner := bufio.NewScanner(cpuMaxParams) + if scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 || len(fields) > 2 { + return -1, false, fmt.Errorf("invalid format") + } + + if fields[_cgroupv2CPUMaxQuotaIndex] == _cgroupV2CPUMaxQuotaMax { + return -1, false, nil + } + + max, err := strconv.Atoi(fields[_cgroupv2CPUMaxQuotaIndex]) + if err != nil { + return -1, false, err + } + + var period int + if len(fields) == 1 { + period = _cgroupV2CPUMaxDefaultPeriod + } else { + period, err = strconv.Atoi(fields[_cgroupv2CPUMaxPeriodIndex]) + if err != nil { + return -1, false, err + } + + if period == 0 { + return -1, false, errors.New("zero value for period is not allowed") + } + } + + return float64(max) / float64(period), true, nil + } + + if err := scanner.Err(); err != nil { + return -1, false, err + } + + return 0, false, io.ErrUnexpectedEOF +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go index bad8d7ae..94ac75a4 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go @@ -18,6 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +//go:build linux // +build linux package cgroups diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go index d6238d5c..f3877f78 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go @@ -18,6 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +//go:build linux // +build linux package cgroups @@ -94,8 +95,12 @@ func NewMountPointFromLine(line string) (*MountPoint, error) { for i, field := range fields[_miFieldIDOptionalFields:] { if field == _mountInfoOptionalFieldsSep { + // End of optional fields. fsTypeStart := _miFieldIDOptionalFields + i + 1 + // Now we know where the optional fields end, split the line again with a + // limit to avoid issues with spaces in super options as present on WSL. + fields = strings.SplitN(line, _mountInfoSep, fsTypeStart+_miFieldCountSecondHalf) if len(fields) != fsTypeStart+_miFieldCountSecondHalf { return nil, mountPointFormatInvalidError{line} } diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go index 103e86f3..cddc3eae 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go @@ -18,6 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +//go:build linux // +build linux package cgroups diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go index 37699c31..f9057fd2 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go @@ -18,20 +18,25 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +//go:build linux // +build linux package runtime import ( - "math" + "errors" cg "go.uber.org/automaxprocs/internal/cgroups" ) // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process -// to a valid GOMAXPROCS value. -func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) { - cgroups, err := cg.NewCGroupsForCurrentProcess() +// to a valid GOMAXPROCS value. The quota is converted from float to int using round. +// If round == nil, DefaultRoundFunc is used. +func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) { + if round == nil { + round = DefaultRoundFunc + } + cgroups, err := _newQueryer() if err != nil { return -1, CPUQuotaUndefined, err } @@ -41,9 +46,30 @@ func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) { return -1, CPUQuotaUndefined, err } - maxProcs := int(math.Floor(quota)) + maxProcs := round(quota) if minValue > 0 && maxProcs < minValue { return minValue, CPUQuotaMinUsed, nil } return maxProcs, CPUQuotaUsed, nil } + +type queryer interface { + CPUQuota() (float64, bool, error) +} + +var ( + _newCgroups2 = cg.NewCGroups2ForCurrentProcess + _newCgroups = cg.NewCGroupsForCurrentProcess + _newQueryer = newQueryer +) + +func newQueryer() (queryer, error) { + cgroups, err := _newCgroups2() + if err == nil { + return cgroups, nil + } + if errors.Is(err, cg.ErrNotV2) { + return _newCgroups() + } + return nil, err +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go index 5915a2ef..e7470150 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go @@ -18,6 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +//go:build !linux // +build !linux package runtime @@ -25,6 +26,6 @@ package runtime // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process // to a valid GOMAXPROCS value. This is Linux-specific and not supported in the // current OS. -func CPUQuotaToGOMAXPROCS(_ int) (int, CPUQuotaStatus, error) { +func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) { return -1, CPUQuotaUndefined, nil } diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go index 3a751564..f8a2834a 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go @@ -20,6 +20,8 @@ package runtime +import "math" + // CPUQuotaStatus presents the status of how CPU quota is used type CPUQuotaStatus int @@ -28,6 +30,11 @@ const ( CPUQuotaUndefined CPUQuotaStatus = iota // CPUQuotaUsed is returned when a valid CPU quota can be used CPUQuotaUsed - // CPUQuotaMinUsed is return when CPU quota is smaller than the min value + // CPUQuotaMinUsed is returned when CPU quota is smaller than the min value CPUQuotaMinUsed ) + +// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor). +func DefaultRoundFunc(v float64) int { + return int(math.Floor(v)) +} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go index ec032438..e561fe60 100644 --- a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go +++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go @@ -37,9 +37,10 @@ func currentMaxProcs() int { } type config struct { - printf func(string, ...interface{}) - procs func(int) (int, iruntime.CPUQuotaStatus, error) - minGOMAXPROCS int + printf func(string, ...interface{}) + procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error) + minGOMAXPROCS int + roundQuotaFunc func(v float64) int } func (c *config) log(fmt string, args ...interface{}) { @@ -71,6 +72,13 @@ func Min(n int) Option { }) } +// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int. +func RoundQuotaFunc(rf func(v float64) int) Option { + return optionFunc(func(cfg *config) { + cfg.roundQuotaFunc = rf + }) +} + type optionFunc func(*config) func (of optionFunc) apply(cfg *config) { of(cfg) } @@ -82,8 +90,9 @@ func (of optionFunc) apply(cfg *config) { of(cfg) } // configured CPU quota. func Set(opts ...Option) (func(), error) { cfg := &config{ - procs: iruntime.CPUQuotaToGOMAXPROCS, - minGOMAXPROCS: 1, + procs: iruntime.CPUQuotaToGOMAXPROCS, + roundQuotaFunc: iruntime.DefaultRoundFunc, + minGOMAXPROCS: 1, } for _, o := range opts { o.apply(cfg) @@ -96,13 +105,13 @@ func Set(opts ...Option) (func(), error) { // Honor the GOMAXPROCS environment variable if present. Otherwise, amend // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is // Linux, and guarantee a minimum value of 1. The minimum guaranteed value - // can be overriden using `maxprocs.Min()`. + // can be overridden using `maxprocs.Min()`. if max, exists := os.LookupEnv(_maxProcsKey); exists { cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max) return undoNoop, nil } - maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS) + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) if err != nil { return undoNoop, err } diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go index d9ad8f4c..cc7fc5ae 100644 --- a/vendor/go.uber.org/automaxprocs/maxprocs/version.go +++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go @@ -21,4 +21,4 @@ package maxprocs // Version is the current package version. -const Version = "1.4.0" +const Version = "1.6.0" diff --git a/vendor/go.uber.org/mock/mockgen/mockgen.go b/vendor/go.uber.org/mock/mockgen/mockgen.go index b5365de5..79cce84b 100644 --- a/vendor/go.uber.org/mock/mockgen/mockgen.go +++ b/vendor/go.uber.org/mock/mockgen/mockgen.go @@ -393,8 +393,8 @@ func (g *generator) Generate(pkg *model.Package, outputPkgName string, outputPac // try base0, base1, ... pkgName := base - if _, ok := definedImports[base]; ok { - pkgName = definedImports[base] + if _, ok := definedImports[pth]; ok { + pkgName = definedImports[pth] } i := 0 @@ -758,6 +758,17 @@ func (g *generator) GenerateMockReturnCallMethod(intf *model.Interface, m *model return nil } +// nameExistsAsPackage returns true if the name exists as a package name. +// This is used to avoid name collisions when generating mock method arguments. +func (g *generator) nameExistsAsPackage(name string) bool { + for _, symbolName := range g.packageMap { + if symbolName == name { + return true + } + } + return false +} + func (g *generator) getArgNames(m *model.Method, in bool) []string { var params []*model.Parameter if in { @@ -766,16 +777,19 @@ func (g *generator) getArgNames(m *model.Method, in bool) []string { params = m.Out } argNames := make([]string, len(params)) + for i, p := range params { name := p.Name - if name == "" || name == "_" { + + if name == "" || name == "_" || g.nameExistsAsPackage(name) { name = fmt.Sprintf("arg%d", i) } argNames[i] = name } if m.Variadic != nil && in { name := m.Variadic.Name - if name == "" { + + if name == "" || g.nameExistsAsPackage(name) { name = fmt.Sprintf("arg%d", len(params)) } argNames = append(argNames, name) diff --git a/vendor/go.uber.org/mock/mockgen/package_mode.go b/vendor/go.uber.org/mock/mockgen/package_mode.go index abc9c7a4..acbe487f 100644 --- a/vendor/go.uber.org/mock/mockgen/package_mode.go +++ b/vendor/go.uber.org/mock/mockgen/package_mode.go @@ -4,6 +4,7 @@ import ( "errors" "flag" "fmt" + "go/ast" "go/types" "strings" @@ -17,6 +18,20 @@ var ( type packageModeParser struct { pkgName string + + // Mapping from underlying types to aliases used within the package source. + // + // We prefer to use aliases used in the source rather than underlying type names + // as those may be unexported or internal. + // TODO(joaks): Once mock is Go1.23+ only, we can remove this + // as the casing for types.Alias will automatically handle this + // in all cases. + aliasReplacements map[types.Type]aliasReplacement +} + +type aliasReplacement struct { + name string + pkg string } func (p *packageModeParser) parsePackage(packageName string, ifaces []string) (*model.Package, error) { @@ -27,6 +42,8 @@ func (p *packageModeParser) parsePackage(packageName string, ifaces []string) (* return nil, fmt.Errorf("load package: %w", err) } + p.buildAliasReplacements(pkg) + interfaces, err := p.extractInterfacesFromPackage(pkg, ifaces) if err != nil { return nil, fmt.Errorf("extract interfaces from package: %w", err) @@ -39,6 +56,90 @@ func (p *packageModeParser) parsePackage(packageName string, ifaces []string) (* }, nil } +// buildAliasReplacements finds and records any references to aliases +// within the given package's source. +// These aliases will be preferred when parsing types +// over the underlying name counterparts, as those may be unexported / internal. +// +// If a type has more than one alias within the source package, +// the latest one to be inspected will be the one used for mapping. +// This is fine, since all aliases and their underlying types are interchangeable +// from a type-checking standpoint. +func (p *packageModeParser) buildAliasReplacements(pkg *packages.Package) { + p.aliasReplacements = make(map[types.Type]aliasReplacement) + + // checkIdent checks if the given identifier exists + // in the given package as an alias, and adds it to + // the alias replacements map if so. + checkIdent := func(pkg *types.Package, ident string) bool { + scope := pkg.Scope() + if scope == nil { + return true + } + obj := scope.Lookup(ident) + if obj == nil { + return true + } + objTypeName, ok := obj.(*types.TypeName) + if !ok { + return true + } + if !objTypeName.IsAlias() { + return true + } + typ := objTypeName.Type() + if typ == nil { + return true + } + p.aliasReplacements[typ] = aliasReplacement{ + name: objTypeName.Name(), + pkg: pkg.Path(), + } + return false + + } + + for _, f := range pkg.Syntax { + fileScope, ok := pkg.TypesInfo.Scopes[f] + if !ok { + continue + } + ast.Inspect(f, func(node ast.Node) bool { + + // Simple identifiers: check if it is an alias + // from the source package. + if ident, ok := node.(*ast.Ident); ok { + return checkIdent(pkg.Types, ident.String()) + } + + // Selector expressions: check if it is an alias + // from the package represented by the qualifier. + selExpr, ok := node.(*ast.SelectorExpr) + if !ok { + return true + } + + x, sel := selExpr.X, selExpr.Sel + xident, ok := x.(*ast.Ident) + if !ok { + return true + } + + xObj := fileScope.Lookup(xident.String()) + pkgName, ok := xObj.(*types.PkgName) + if !ok { + return true + } + + xPkg := pkgName.Imported() + if xPkg == nil { + return true + } + return checkIdent(xPkg, sel.String()) + }) + } +} + func (p *packageModeParser) loadPackage(packageName string) (*packages.Package, error) { var buildFlagsSet []string if *buildFlags != "" { @@ -46,7 +147,7 @@ func (p *packageModeParser) loadPackage(packageName string) (*packages.Package, } cfg := &packages.Config{ - Mode: packages.NeedDeps | packages.NeedImports | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedEmbedFiles, + Mode: packages.NeedDeps | packages.NeedImports | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedEmbedFiles | packages.LoadSyntax, BuildFlags: buildFlagsSet, } pkgs, err := packages.Load(cfg, packageName) @@ -193,17 +294,26 @@ func (p *packageModeParser) parseType(t types.Type) (model.Type, error) { return sig, nil case *types.Named, *types.Alias: object := t.(interface{ Obj() *types.TypeName }) + name := object.Obj().Name() var pkg string if object.Obj().Pkg() != nil { pkg = object.Obj().Pkg().Path() } + // If there was an alias to this type used somewhere in the source, + // use that alias instead of the underlying type, + // since the underlying type might be unexported. + if alias, ok := p.aliasReplacements[t]; ok { + name = alias.name + pkg = alias.pkg + } + // TypeArgs method not available for aliases in go1.22 genericType, ok := t.(interface{ TypeArgs() *types.TypeList }) if !ok || genericType.TypeArgs() == nil { return &model.NamedType{ Package: pkg, - Type: object.Obj().Name(), + Type: name, }, nil } @@ -220,7 +330,7 @@ func (p *packageModeParser) parseType(t types.Type) (model.Type, error) { return &model.NamedType{ Package: pkg, - Type: object.Obj().Name(), + Type: name, TypeParams: typeParams, }, nil case *types.Interface: diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go index bd896bdc..8d99551f 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego +//go:build (!amd64 && !loong64 && !ppc64le && !ppc64 && !s390x) || !gc || purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go similarity index 94% rename from vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go rename to vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go index 164cd47d..315b84ac 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (amd64 || loong64 || ppc64 || ppc64le) package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s new file mode 100644 index 00000000..bc8361da --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s @@ -0,0 +1,123 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +// func update(state *macState, msg []byte) +TEXT ·update(SB), $0-32 + MOVV state+0(FP), R4 + MOVV msg_base+8(FP), R5 + MOVV msg_len+16(FP), R6 + + MOVV $0x10, R7 + + MOVV (R4), R8 // h0 + MOVV 8(R4), R9 // h1 + MOVV 16(R4), R10 // h2 + MOVV 24(R4), R11 // r0 + MOVV 32(R4), R12 // r1 + + BLT R6, R7, bytes_between_0_and_15 + +loop: + MOVV (R5), R14 // msg[0:8] + MOVV 8(R5), R16 // msg[8:16] + ADDV R14, R8, R8 // h0 (x1 + y1 = z1', if z1' < x1 then z1' overflow) + ADDV R16, R9, R27 + SGTU R14, R8, R24 // h0.carry + SGTU R9, R27, R28 + ADDV R27, R24, R9 // h1 + SGTU R27, R9, R24 + OR R24, R28, R24 // h1.carry + ADDV $0x01, R24, R24 + ADDV R10, R24, R10 // h2 + + ADDV $16, R5, R5 // msg = msg[16:] + +multiply: + MULV R8, R11, R14 // h0r0.lo + MULHVU R8, R11, R15 // h0r0.hi + MULV R9, R11, R13 // h1r0.lo + MULHVU R9, R11, R16 // h1r0.hi + ADDV R13, R15, R15 + SGTU R13, R15, R24 + ADDV R24, R16, R16 + MULV R10, R11, R25 + ADDV R16, R25, R25 + MULV R8, R12, R13 // h0r1.lo + MULHVU R8, R12, R16 // h0r1.hi + ADDV R13, R15, R15 + SGTU R13, R15, R24 + ADDV R24, R16, R16 + MOVV R16, R8 + MULV R10, R12, R26 // h2r1 + MULV R9, R12, R13 // h1r1.lo + MULHVU R9, R12, R16 // h1r1.hi + ADDV R13, R25, R25 + ADDV R16, R26, R27 + SGTU R13, R25, R24 + ADDV R27, R24, R26 + ADDV R8, R25, R25 + SGTU R8, R25, R24 + ADDV R24, R26, R26 + AND $3, R25, R10 + AND $-4, R25, R17 + ADDV R17, R14, R8 + ADDV R26, R15, R27 + SGTU R17, R8, R24 + SGTU R26, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R24, R10, R10 + SLLV $62, R26, R27 + SRLV $2, R25, R28 + SRLV $2, R26, R26 + OR R27, R28, R25 + ADDV R25, R8, R8 + ADDV R26, R9, R27 + SGTU R25, R8, R24 + SGTU R26, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R24, R10, R10 + + SUBV $16, R6, R6 + BGE R6, R7, loop + +bytes_between_0_and_15: + BEQ R6, R0, done + MOVV $1, R14 + XOR R15, R15 + ADDV R6, R5, R5 + +flush_buffer: + MOVBU -1(R5), R25 + SRLV $56, R14, R24 + SLLV $8, R15, R28 + SLLV $8, R14, R14 + OR R24, R28, R15 + XOR R25, R14, R14 + SUBV $1, R6, R6 + SUBV $1, R5, R5 + BNE R6, R0, flush_buffer + + ADDV R14, R8, R8 + SGTU R14, R8, R24 + ADDV R15, R9, R27 + SGTU R15, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R10, R24, R10 + + MOVV $16, R6 + JMP multiply + +done: + MOVV R8, (R4) + MOVV R9, 8(R4) + MOVV R10, 16(R4) + RET diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go deleted file mode 100644 index 1a1679aa..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego && (ppc64 || ppc64le) - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index 28cd99c7..00000000 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := prf.Size() - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := 1; block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[len(dk)-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := 2; n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 56cdc7c2..b6bf546b 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -5,7 +5,6 @@ package ssh import ( - "crypto/rand" "errors" "fmt" "io" @@ -25,6 +24,11 @@ const debugHandshake = false // quickly. const chanSize = 16 +// maxPendingPackets sets the maximum number of packets to queue while waiting +// for KEX to complete. This limits the total pending data to maxPendingPackets +// * maxPacket bytes, which is ~16.8MB. +const maxPendingPackets = 64 + // keyingTransport is a packet based transport that supports key // changes. It need not be thread-safe. It should pass through // msgNewKeys in both directions. @@ -73,13 +77,22 @@ type handshakeTransport struct { incoming chan []byte readError error - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. + mu sync.Mutex + // Condition for the above mutex. It is used to notify a completed key + // exchange or a write failure. Writes can wait for this condition while a + // key exchange is in progress. + writeCond *sync.Cond + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + // Used to queue writes when a key exchange is in progress. The length is + // limited by pendingPacketsSize. Once full, writes will block until the key + // exchange is completed or an error occurs. If not empty, it is emptied + // all at once when the key exchange is completed in kexLoop. + pendingPackets [][]byte writePacketsLeft uint32 writeBytesLeft int64 + userAuthComplete bool // whether the user authentication phase is complete // If the read loop wants to schedule a kex, it pings this // channel, and the write loop will send out a kex @@ -133,6 +146,7 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, config: config, } + t.writeCond = sync.NewCond(&t.mu) t.resetReadThresholds() t.resetWriteThresholds() @@ -259,6 +273,7 @@ func (t *handshakeTransport) recordWriteError(err error) { defer t.mu.Unlock() if t.writeError == nil && err != nil { t.writeError = err + t.writeCond.Broadcast() } } @@ -362,6 +377,8 @@ write: } } t.pendingPackets = t.pendingPackets[:0] + // Unblock writePacket if waiting for KEX. + t.writeCond.Broadcast() t.mu.Unlock() } @@ -483,7 +500,7 @@ func (t *handshakeTransport) sendKexInit() error { CompressionClientServer: supportedCompressions, CompressionServerClient: supportedCompressions, } - io.ReadFull(rand.Reader, msg.Cookie[:]) + io.ReadFull(t.config.Rand, msg.Cookie[:]) // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, // and possibly to add the ext-info extension algorithm. Since the slice may be the @@ -552,26 +569,44 @@ func (t *handshakeTransport) sendKexInit() error { return nil } +var errSendBannerPhase = errors.New("ssh: SendAuthBanner outside of authentication phase") + func (t *handshakeTransport) writePacket(p []byte) error { + t.mu.Lock() + defer t.mu.Unlock() + switch p[0] { case msgKexInit: return errors.New("ssh: only handshakeTransport can send kexInit") case msgNewKeys: return errors.New("ssh: only handshakeTransport can send newKeys") + case msgUserAuthBanner: + if t.userAuthComplete { + return errSendBannerPhase + } + case msgUserAuthSuccess: + t.userAuthComplete = true } - t.mu.Lock() - defer t.mu.Unlock() if t.writeError != nil { return t.writeError } if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil + if len(t.pendingPackets) < maxPendingPackets { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + for t.sentInitMsg != nil { + // Block and wait for KEX to complete or an error. + t.writeCond.Wait() + if t.writeError != nil { + return t.writeError + } + } } if t.writeBytesLeft > 0 { @@ -588,6 +623,7 @@ func (t *handshakeTransport) writePacket(p []byte) error { if err := t.pushPacket(p); err != nil { t.writeError = err + t.writeCond.Broadcast() } return nil diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index b55f8605..118427bc 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -818,6 +818,8 @@ func decode(packet []byte) (interface{}, error) { return new(userAuthSuccessMsg), nil case msgUserAuthFailure: msg = new(userAuthFailureMsg) + case msgUserAuthBanner: + msg = new(userAuthBannerMsg) case msgUserAuthPubKeyOk: msg = new(userAuthPubKeyOkMsg) case msgGlobalRequest: diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 5b5ccd96..1839ddc6 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -59,6 +59,27 @@ type GSSAPIWithMICConfig struct { Server GSSAPIServer } +// SendAuthBanner implements [ServerPreAuthConn]. +func (s *connection) SendAuthBanner(msg string) error { + return s.transport.writePacket(Marshal(&userAuthBannerMsg{ + Message: msg, + })) +} + +func (*connection) unexportedMethodForFutureProofing() {} + +// ServerPreAuthConn is the interface available on an incoming server +// connection before authentication has completed. +type ServerPreAuthConn interface { + unexportedMethodForFutureProofing() // permits growing ServerPreAuthConn safely later, ala testing.TB + + ConnMetadata + + // SendAuthBanner sends a banner message to the client. + // It returns an error once the authentication phase has ended. + SendAuthBanner(string) error +} + // ServerConfig holds server specific configuration data. type ServerConfig struct { // Config contains configuration shared between client and server. @@ -118,6 +139,12 @@ type ServerConfig struct { // attempts. AuthLogCallback func(conn ConnMetadata, method string, err error) + // PreAuthConnCallback, if non-nil, is called upon receiving a new connection + // before any authentication has started. The provided ServerPreAuthConn + // can be used at any time before authentication is complete, including + // after this callback has returned. + PreAuthConnCallback func(ServerPreAuthConn) + // ServerVersion is the version identification string to announce in // the public handshake. // If empty, a reasonable default is used. @@ -488,6 +515,10 @@ func (b *BannerError) Error() string { } func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + if config.PreAuthConnCallback != nil { + config.PreAuthConnCallback(s) + } + sessionID := s.transport.getSessionID() var cache pubKeyCache var perms *Permissions @@ -495,7 +526,7 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err authFailures := 0 noneAuthCount := 0 var authErrs []error - var displayedBanner bool + var calledBannerCallback bool partialSuccessReturned := false // Set the initial authentication callbacks from the config. They can be // changed if a PartialSuccessError is returned. @@ -542,14 +573,10 @@ userAuthLoop: s.user = userAuthReq.User - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + if !calledBannerCallback && config.BannerCallback != nil { + calledBannerCallback = true + if msg := config.BannerCallback(s); msg != "" { + if err := s.SendAuthBanner(msg); err != nil { return nil, err } } @@ -762,10 +789,7 @@ userAuthLoop: var bannerErr *BannerError if errors.As(authErr, &bannerErr) { if bannerErr.Message != "" { - bannerMsg := &userAuthBannerMsg{ - Message: bannerErr.Message, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + if err := s.SendAuthBanner(bannerErr.Message); err != nil { return nil, err } } diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go index ef5059a1..93d844f0 100644 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -459,7 +459,7 @@ func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel return nil, err } go DiscardRequests(in) - return ch, err + return ch, nil } type tcpChan struct { diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/exp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/exp/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/rand/exp.go b/vendor/golang.org/x/exp/rand/exp.go deleted file mode 100644 index 08386727..00000000 --- a/vendor/golang.org/x/exp/rand/exp.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rand - -import ( - "math" -) - -/* - * Exponential distribution - * - * See "The Ziggurat Method for Generating Random Variables" - * (Marsaglia & Tsang, 2000) - * http://www.jstatsoft.org/v05/i08/paper [pdf] - */ - -const ( - re = 7.69711747013104972 -) - -// ExpFloat64 returns an exponentially distributed float64 in the range -// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter -// (lambda) is 1 and whose mean is 1/lambda (1). -// To produce a distribution with a different rate parameter, -// callers can adjust the output using: -// -// sample = ExpFloat64() / desiredRateParameter -func (r *Rand) ExpFloat64() float64 { - for { - j := r.Uint32() - i := j & 0xFF - x := float64(j) * float64(we[i]) - if j < ke[i] { - return x - } - if i == 0 { - return re - math.Log(r.Float64()) - } - if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) { - return x - } - } -} - -var ke = [256]uint32{ - 0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990, - 0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8, - 0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78, - 0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651, - 0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca, - 0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8, - 0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea, - 0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba, - 0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed, - 0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662, - 0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3, - 0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace, - 0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6, - 0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7, - 0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415, - 0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4, - 0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36, - 0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46, - 0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac, - 0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245, - 0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52, - 0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06, - 0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0, - 0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9, - 0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76, - 0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516, - 0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289, - 0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed, - 0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb, - 0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e, - 0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a, - 0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1, - 0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b, - 0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621, - 0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d, - 0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3, - 0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73, - 0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88, - 0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a, - 0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb, - 0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176, - 0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be, - 0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192, - 0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed, - 0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936, - 0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b, - 0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4, - 0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1, - 0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482, - 0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023, - 0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d, - 0xe6da6ecf, -} -var we = [256]float32{ - 2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11, - 3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11, - 5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11, - 7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11, - 9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10, - 1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10, - 1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10, - 1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10, - 1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10, - 1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10, - 1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10, - 1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10, - 1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10, - 1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10, - 2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10, - 2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10, - 2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10, - 2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10, - 2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10, - 2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10, - 2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10, - 2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10, - 2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10, - 2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10, - 3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10, - 3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10, - 3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10, - 3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10, - 3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10, - 3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10, - 3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10, - 3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10, - 3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10, - 4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10, - 4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10, - 4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10, - 4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10, - 4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10, - 4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10, - 4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10, - 4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10, - 5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10, - 5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10, - 5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10, - 5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10, - 5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10, - 5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10, - 6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10, - 6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10, - 6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10, - 6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10, - 6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10, - 7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10, - 7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10, - 7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10, - 8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10, - 8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10, - 8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10, - 9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10, - 9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09, - 1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09, - 1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09, - 1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09, - 1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09, -} -var fe = [256]float32{ - 1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933, - 0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686, - 0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665, - 0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967, - 0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896, - 0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092, - 0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386, - 0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495, - 0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752, - 0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325, - 0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955, - 0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694, - 0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218, - 0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763, - 0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044, - 0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796, - 0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408, - 0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928, - 0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393, - 0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625, - 0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107, - 0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878, - 0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438, - 0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682, - 0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852, - 0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479, - 0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354, - 0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494, - 0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119, - 0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624, - 0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574, - 0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672, - 0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763, - 0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816, - 0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919, - 0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274, - 0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195, - 0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106, - 0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434, - 0.062193416, 0.060783047, 0.059384305, 0.057997175, - 0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236, - 0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623, - 0.043502413, 0.042254124, 0.041017443, 0.039792392, - 0.038578995, 0.037377283, 0.036187284, 0.035009038, - 0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566, - 0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421, - 0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867, - 0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392, - 0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414, - 0.008780315, 0.007963077, 0.0071633533, 0.006381906, - 0.0056196423, 0.0048776558, 0.004157295, 0.0034602648, - 0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693, - 0.00045413437, -} diff --git a/vendor/golang.org/x/exp/rand/normal.go b/vendor/golang.org/x/exp/rand/normal.go deleted file mode 100644 index b66da3a8..00000000 --- a/vendor/golang.org/x/exp/rand/normal.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rand - -import ( - "math" -) - -/* - * Normal distribution - * - * See "The Ziggurat Method for Generating Random Variables" - * (Marsaglia & Tsang, 2000) - * http://www.jstatsoft.org/v05/i08/paper [pdf] - */ - -const ( - rn = 3.442619855899 -) - -func absInt32(i int32) uint32 { - if i < 0 { - return uint32(-i) - } - return uint32(i) -} - -// NormFloat64 returns a normally distributed float64 in the range -// [-math.MaxFloat64, +math.MaxFloat64] with -// standard normal distribution (mean = 0, stddev = 1). -// To produce a different normal distribution, callers can -// adjust the output using: -// -// sample = NormFloat64() * desiredStdDev + desiredMean -func (r *Rand) NormFloat64() float64 { - for { - j := int32(r.Uint32()) // Possibly negative - i := j & 0x7F - x := float64(j) * float64(wn[i]) - if absInt32(j) < kn[i] { - // This case should be hit better than 99% of the time. - return x - } - - if i == 0 { - // This extra work is only required for the base strip. - for { - x = -math.Log(r.Float64()) * (1.0 / rn) - y := -math.Log(r.Float64()) - if y+y >= x*x { - break - } - } - if j > 0 { - return rn + x - } - return -rn - x - } - if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) { - return x - } - } -} - -var kn = [128]uint32{ - 0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2, - 0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d, - 0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7, - 0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883, - 0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30, - 0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa, - 0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d, - 0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18, - 0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924, - 0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a, - 0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4, - 0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62, - 0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e, - 0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473, - 0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd, - 0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568, - 0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08, - 0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc, - 0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94, - 0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb, - 0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075, - 0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba, - 0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded, - 0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72, - 0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a, - 0x7ba90bdc, 0x7a722176, 0x77d664e5, -} -var wn = [128]float32{ - 1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10, - 2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10, - 2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10, - 3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10, - 3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10, - 4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10, - 4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10, - 4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10, - 5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10, - 5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10, - 5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10, - 5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10, - 6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10, - 6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10, - 6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10, - 6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10, - 7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10, - 7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10, - 7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10, - 7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10, - 8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10, - 8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10, - 8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10, - 9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10, - 9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10, - 9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09, - 1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09, - 1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09, - 1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09, - 1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09, - 1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09, - 1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09, -} -var fn = [128]float32{ - 1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303, - 0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177, - 0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569, - 0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277, - 0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434, - 0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903, - 0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055, - 0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766, - 0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872, - 0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642, - 0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446, - 0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685, - 0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484, - 0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807, - 0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239, - 0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877, - 0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499, - 0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037, - 0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265, - 0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602, - 0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467, - 0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058, - 0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863, - 0.040742867, 0.03688439, 0.033087887, 0.029356318, - 0.025693292, 0.022103304, 0.018592102, 0.015167298, - 0.011839478, 0.008624485, 0.005548995, 0.0026696292, -} diff --git a/vendor/golang.org/x/exp/rand/rand.go b/vendor/golang.org/x/exp/rand/rand.go deleted file mode 100644 index ee6161bc..00000000 --- a/vendor/golang.org/x/exp/rand/rand.go +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package rand implements pseudo-random number generators. -// -// Random numbers are generated by a Source. Top-level functions, such as -// Float64 and Int, use a default shared Source that produces a deterministic -// sequence of values each time a program is run. Use the Seed function to -// initialize the default Source if different behavior is required for each run. -// The default Source, a LockedSource, is safe for concurrent use by multiple -// goroutines, but Sources created by NewSource are not. However, Sources are small -// and it is reasonable to have a separate Source for each goroutine, seeded -// differently, to avoid locking. -// -// For random numbers suitable for security-sensitive work, see the crypto/rand -// package. -package rand - -import "sync" - -// A Source represents a source of uniformly-distributed -// pseudo-random int64 values in the range [0, 1<<64). -type Source interface { - Uint64() uint64 - Seed(seed uint64) -} - -// NewSource returns a new pseudo-random Source seeded with the given value. -func NewSource(seed uint64) Source { - var rng PCGSource - rng.Seed(seed) - return &rng -} - -// A Rand is a source of random numbers. -type Rand struct { - src Source - - // readVal contains remainder of 64-bit integer used for bytes - // generation during most recent Read call. - // It is saved so next Read call can start where the previous - // one finished. - readVal uint64 - // readPos indicates the number of low-order bytes of readVal - // that are still valid. - readPos int8 -} - -// New returns a new Rand that uses random values from src -// to generate other random values. -func New(src Source) *Rand { - return &Rand{src: src} -} - -// Seed uses the provided seed value to initialize the generator to a deterministic state. -// Seed should not be called concurrently with any other Rand method. -func (r *Rand) Seed(seed uint64) { - if lk, ok := r.src.(*LockedSource); ok { - lk.seedPos(seed, &r.readPos) - return - } - - r.src.Seed(seed) - r.readPos = 0 -} - -// Uint64 returns a pseudo-random 64-bit integer as a uint64. -func (r *Rand) Uint64() uint64 { return r.src.Uint64() } - -// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. -func (r *Rand) Int63() int64 { return int64(r.src.Uint64() &^ (1 << 63)) } - -// Uint32 returns a pseudo-random 32-bit value as a uint32. -func (r *Rand) Uint32() uint32 { return uint32(r.Uint64() >> 32) } - -// Int31 returns a non-negative pseudo-random 31-bit integer as an int32. -func (r *Rand) Int31() int32 { return int32(r.Uint64() >> 33) } - -// Int returns a non-negative pseudo-random int. -func (r *Rand) Int() int { - u := uint(r.Uint64()) - return int(u << 1 >> 1) // clear sign bit. -} - -const maxUint64 = (1 << 64) - 1 - -// Uint64n returns, as a uint64, a pseudo-random number in [0,n). -// It is guaranteed more uniform than taking a Source value mod n -// for any n that is not a power of 2. -func (r *Rand) Uint64n(n uint64) uint64 { - if n&(n-1) == 0 { // n is power of two, can mask - if n == 0 { - panic("invalid argument to Uint64n") - } - return r.Uint64() & (n - 1) - } - // If n does not divide v, to avoid bias we must not use - // a v that is within maxUint64%n of the top of the range. - v := r.Uint64() - if v > maxUint64-n { // Fast check. - ceiling := maxUint64 - maxUint64%n - for v >= ceiling { - v = r.Uint64() - } - } - - return v % n -} - -// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n). -// It panics if n <= 0. -func (r *Rand) Int63n(n int64) int64 { - if n <= 0 { - panic("invalid argument to Int63n") - } - return int64(r.Uint64n(uint64(n))) -} - -// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n). -// It panics if n <= 0. -func (r *Rand) Int31n(n int32) int32 { - if n <= 0 { - panic("invalid argument to Int31n") - } - // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. - return int32(r.Uint64n(uint64(n))) -} - -// Intn returns, as an int, a non-negative pseudo-random number in [0,n). -// It panics if n <= 0. -func (r *Rand) Intn(n int) int { - if n <= 0 { - panic("invalid argument to Intn") - } - // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. - return int(r.Uint64n(uint64(n))) -} - -// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0). -func (r *Rand) Float64() float64 { - // There is one bug in the value stream: r.Int63() may be so close - // to 1<<63 that the division rounds up to 1.0, and we've guaranteed - // that the result is always less than 1.0. - // - // We tried to fix this by mapping 1.0 back to 0.0, but since float64 - // values near 0 are much denser than near 1, mapping 1 to 0 caused - // a theoretically significant overshoot in the probability of returning 0. - // Instead of that, if we round up to 1, just try again. - // Getting 1 only happens 1/2⁵³ of the time, so most clients - // will not observe it anyway. -again: - f := float64(r.Uint64n(1<<53)) / (1 << 53) - if f == 1.0 { - goto again // resample; this branch is taken O(never) - } - return f -} - -// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0). -func (r *Rand) Float32() float32 { - // We do not want to return 1.0. - // This only happens 1/2²⁴ of the time (plus the 1/2⁵³ of the time in Float64). -again: - f := float32(r.Float64()) - if f == 1 { - goto again // resample; this branch is taken O(very rarely) - } - return f -} - -// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n). -func (r *Rand) Perm(n int) []int { - m := make([]int, n) - // In the following loop, the iteration when i=0 always swaps m[0] with m[0]. - // A change to remove this useless iteration is to assign 1 to i in the init - // statement. But Perm also effects r. Making this change will affect - // the final state of r. So this change can't be made for compatibility - // reasons for Go 1. - for i := 0; i < n; i++ { - j := r.Intn(i + 1) - m[i] = m[j] - m[j] = i - } - return m -} - -// Shuffle pseudo-randomizes the order of elements. -// n is the number of elements. Shuffle panics if n < 0. -// swap swaps the elements with indexes i and j. -func (r *Rand) Shuffle(n int, swap func(i, j int)) { - if n < 0 { - panic("invalid argument to Shuffle") - } - - // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle - // Shuffle really ought not be called with n that doesn't fit in 32 bits. - // Not only will it take a very long time, but with 2³¹! possible permutations, - // there's no way that any PRNG can have a big enough internal state to - // generate even a minuscule percentage of the possible permutations. - // Nevertheless, the right API signature accepts an int n, so handle it as best we can. - i := n - 1 - for ; i > 1<<31-1-1; i-- { - j := int(r.Int63n(int64(i + 1))) - swap(i, j) - } - for ; i > 0; i-- { - j := int(r.Int31n(int32(i + 1))) - swap(i, j) - } -} - -// Read generates len(p) random bytes and writes them into p. It -// always returns len(p) and a nil error. -// Read should not be called concurrently with any other Rand method unless -// the underlying source is a LockedSource. -func (r *Rand) Read(p []byte) (n int, err error) { - if lk, ok := r.src.(*LockedSource); ok { - return lk.Read(p, &r.readVal, &r.readPos) - } - return read(p, r.src, &r.readVal, &r.readPos) -} - -func read(p []byte, src Source, readVal *uint64, readPos *int8) (n int, err error) { - pos := *readPos - val := *readVal - rng, _ := src.(*PCGSource) - for n = 0; n < len(p); n++ { - if pos == 0 { - if rng != nil { - val = rng.Uint64() - } else { - val = src.Uint64() - } - pos = 8 - } - p[n] = byte(val) - val >>= 8 - pos-- - } - *readPos = pos - *readVal = val - return -} - -/* - * Top-level convenience functions - */ - -var globalRand = New(&LockedSource{src: *NewSource(1).(*PCGSource)}) - -// Type assert that globalRand's source is a LockedSource whose src is a PCGSource. -var _ PCGSource = globalRand.src.(*LockedSource).src - -// Seed uses the provided seed value to initialize the default Source to a -// deterministic state. If Seed is not called, the generator behaves as -// if seeded by Seed(1). -// Seed, unlike the Rand.Seed method, is safe for concurrent use. -func Seed(seed uint64) { globalRand.Seed(seed) } - -// Int63 returns a non-negative pseudo-random 63-bit integer as an int64 -// from the default Source. -func Int63() int64 { return globalRand.Int63() } - -// Uint32 returns a pseudo-random 32-bit value as a uint32 -// from the default Source. -func Uint32() uint32 { return globalRand.Uint32() } - -// Uint64 returns a pseudo-random 64-bit value as a uint64 -// from the default Source. -func Uint64() uint64 { return globalRand.Uint64() } - -// Int31 returns a non-negative pseudo-random 31-bit integer as an int32 -// from the default Source. -func Int31() int32 { return globalRand.Int31() } - -// Int returns a non-negative pseudo-random int from the default Source. -func Int() int { return globalRand.Int() } - -// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n) -// from the default Source. -// It panics if n <= 0. -func Int63n(n int64) int64 { return globalRand.Int63n(n) } - -// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n) -// from the default Source. -// It panics if n <= 0. -func Int31n(n int32) int32 { return globalRand.Int31n(n) } - -// Intn returns, as an int, a non-negative pseudo-random number in [0,n) -// from the default Source. -// It panics if n <= 0. -func Intn(n int) int { return globalRand.Intn(n) } - -// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0) -// from the default Source. -func Float64() float64 { return globalRand.Float64() } - -// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0) -// from the default Source. -func Float32() float32 { return globalRand.Float32() } - -// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) -// from the default Source. -func Perm(n int) []int { return globalRand.Perm(n) } - -// Shuffle pseudo-randomizes the order of elements using the default Source. -// n is the number of elements. Shuffle panics if n < 0. -// swap swaps the elements with indexes i and j. -func Shuffle(n int, swap func(i, j int)) { globalRand.Shuffle(n, swap) } - -// Read generates len(p) random bytes from the default Source and -// writes them into p. It always returns len(p) and a nil error. -// Read, unlike the Rand.Read method, is safe for concurrent use. -func Read(p []byte) (n int, err error) { return globalRand.Read(p) } - -// NormFloat64 returns a normally distributed float64 in the range -// [-math.MaxFloat64, +math.MaxFloat64] with -// standard normal distribution (mean = 0, stddev = 1) -// from the default Source. -// To produce a different normal distribution, callers can -// adjust the output using: -// -// sample = NormFloat64() * desiredStdDev + desiredMean -func NormFloat64() float64 { return globalRand.NormFloat64() } - -// ExpFloat64 returns an exponentially distributed float64 in the range -// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter -// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source. -// To produce a distribution with a different rate parameter, -// callers can adjust the output using: -// -// sample = ExpFloat64() / desiredRateParameter -func ExpFloat64() float64 { return globalRand.ExpFloat64() } - -// LockedSource is an implementation of Source that is concurrency-safe. -// A Rand using a LockedSource is safe for concurrent use. -// -// The zero value of LockedSource is valid, but should be seeded before use. -type LockedSource struct { - lk sync.Mutex - src PCGSource -} - -func (s *LockedSource) Uint64() (n uint64) { - s.lk.Lock() - n = s.src.Uint64() - s.lk.Unlock() - return -} - -func (s *LockedSource) Seed(seed uint64) { - s.lk.Lock() - s.src.Seed(seed) - s.lk.Unlock() -} - -// seedPos implements Seed for a LockedSource without a race condiiton. -func (s *LockedSource) seedPos(seed uint64, readPos *int8) { - s.lk.Lock() - s.src.Seed(seed) - *readPos = 0 - s.lk.Unlock() -} - -// Read implements Read for a LockedSource. -func (s *LockedSource) Read(p []byte, readVal *uint64, readPos *int8) (n int, err error) { - s.lk.Lock() - n, err = read(p, &s.src, readVal, readPos) - s.lk.Unlock() - return -} diff --git a/vendor/golang.org/x/exp/rand/rng.go b/vendor/golang.org/x/exp/rand/rng.go deleted file mode 100644 index 9b79108c..00000000 --- a/vendor/golang.org/x/exp/rand/rng.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rand - -import ( - "encoding/binary" - "io" - "math/bits" -) - -// PCGSource is an implementation of a 64-bit permuted congruential -// generator as defined in -// -// PCG: A Family of Simple Fast Space-Efficient Statistically Good -// Algorithms for Random Number Generation -// Melissa E. O’Neill, Harvey Mudd College -// http://www.pcg-random.org/pdf/toms-oneill-pcg-family-v1.02.pdf -// -// The generator here is the congruential generator PCG XSL RR 128/64 (LCG) -// as found in the software available at http://www.pcg-random.org/. -// It has period 2^128 with 128 bits of state, producing 64-bit values. -// Is state is represented by two uint64 words. -type PCGSource struct { - low uint64 - high uint64 -} - -const ( - maxUint32 = (1 << 32) - 1 - - multiplier = 47026247687942121848144207491837523525 - mulHigh = multiplier >> 64 - mulLow = multiplier & maxUint64 - - increment = 117397592171526113268558934119004209487 - incHigh = increment >> 64 - incLow = increment & maxUint64 - - // TODO: Use these? - initializer = 245720598905631564143578724636268694099 - initHigh = initializer >> 64 - initLow = initializer & maxUint64 -) - -// Seed uses the provided seed value to initialize the generator to a deterministic state. -func (pcg *PCGSource) Seed(seed uint64) { - pcg.low = seed - pcg.high = seed // TODO: What is right? -} - -// Uint64 returns a pseudo-random 64-bit unsigned integer as a uint64. -func (pcg *PCGSource) Uint64() uint64 { - pcg.multiply() - pcg.add() - // XOR high and low 64 bits together and rotate right by high 6 bits of state. - return bits.RotateLeft64(pcg.high^pcg.low, -int(pcg.high>>58)) -} - -func (pcg *PCGSource) add() { - var carry uint64 - pcg.low, carry = bits.Add64(pcg.low, incLow, 0) - pcg.high, _ = bits.Add64(pcg.high, incHigh, carry) -} - -func (pcg *PCGSource) multiply() { - hi, lo := bits.Mul64(pcg.low, mulLow) - hi += pcg.high * mulLow - hi += pcg.low * mulHigh - pcg.low = lo - pcg.high = hi -} - -// MarshalBinary returns the binary representation of the current state of the generator. -func (pcg *PCGSource) MarshalBinary() ([]byte, error) { - var buf [16]byte - binary.BigEndian.PutUint64(buf[:8], pcg.high) - binary.BigEndian.PutUint64(buf[8:], pcg.low) - return buf[:], nil -} - -// UnmarshalBinary sets the state of the generator to the state represented in data. -func (pcg *PCGSource) UnmarshalBinary(data []byte) error { - if len(data) < 16 { - return io.ErrUnexpectedEOF - } - pcg.low = binary.BigEndian.Uint64(data[8:]) - pcg.high = binary.BigEndian.Uint64(data[:8]) - return nil -} diff --git a/vendor/golang.org/x/exp/rand/zipf.go b/vendor/golang.org/x/exp/rand/zipf.go deleted file mode 100644 index f04c814e..00000000 --- a/vendor/golang.org/x/exp/rand/zipf.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// W.Hormann, G.Derflinger: -// "Rejection-Inversion to Generate Variates -// from Monotone Discrete Distributions" -// http://eeyore.wu-wien.ac.at/papers/96-04-04.wh-der.ps.gz - -package rand - -import "math" - -// A Zipf generates Zipf distributed variates. -type Zipf struct { - r *Rand - imax float64 - v float64 - q float64 - s float64 - oneminusQ float64 - oneminusQinv float64 - hxm float64 - hx0minusHxm float64 -} - -func (z *Zipf) h(x float64) float64 { - return math.Exp(z.oneminusQ*math.Log(z.v+x)) * z.oneminusQinv -} - -func (z *Zipf) hinv(x float64) float64 { - return math.Exp(z.oneminusQinv*math.Log(z.oneminusQ*x)) - z.v -} - -// NewZipf returns a Zipf variate generator. -// The generator generates values k ∈ [0, imax] -// such that P(k) is proportional to (v + k) ** (-s). -// Requirements: s > 1 and v >= 1. -func NewZipf(r *Rand, s float64, v float64, imax uint64) *Zipf { - z := new(Zipf) - if s <= 1.0 || v < 1 { - return nil - } - z.r = r - z.imax = float64(imax) - z.v = v - z.q = s - z.oneminusQ = 1.0 - z.q - z.oneminusQinv = 1.0 / z.oneminusQ - z.hxm = z.h(z.imax + 0.5) - z.hx0minusHxm = z.h(0.5) - math.Exp(math.Log(z.v)*(-z.q)) - z.hxm - z.s = 1 - z.hinv(z.h(1.5)-math.Exp(-z.q*math.Log(z.v+1.0))) - return z -} - -// Uint64 returns a value drawn from the Zipf distribution described -// by the Zipf object. -func (z *Zipf) Uint64() uint64 { - if z == nil { - panic("rand: nil Zipf") - } - k := 0.0 - - for { - r := z.r.Float64() // r on [0,1] - ur := z.hxm + r*z.hx0minusHxm - x := z.hinv(ur) - k = math.Floor(x + 0.5) - if k-x <= z.s { - break - } - if ur >= z.h(k+0.5)-math.Exp(-math.Log(k+z.v)*z.q) { - break - } - } - return uint64(k) -} diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index 22056825..2d748680 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -226,8 +226,9 @@ func (x *FileSyntax) Cleanup() { continue } if ww == 1 && len(stmt.RParen.Comments.Before) == 0 { - // Collapse block into single line. - line := &Line{ + // Collapse block into single line but keep the Line reference used by the + // parsed File structure. + *stmt.Line[0] = Line{ Comments: Comments{ Before: commentsAdd(stmt.Before, stmt.Line[0].Before), Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), @@ -235,7 +236,7 @@ func (x *FileSyntax) Cleanup() { }, Token: stringsAdd(stmt.Token, stmt.Line[0].Token), } - x.Stmt[w] = line + x.Stmt[w] = stmt.Line[0] w++ continue } @@ -876,6 +877,11 @@ func (in *input) parseLineBlock(start Position, token []string, lparen token) *L in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) case ')': rparen := in.lex() + // Don't preserve blank lines (denoted by a single empty comment, added above) + // at the end of the block. + if len(comments) == 1 && comments[0] == (Comment{}) { + comments = nil + } x.RParen.Before = comments x.RParen.Pos = rparen.pos if !in.peek().isEOL() { diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index 66dcaf98..3e4a1d0a 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -43,6 +43,7 @@ type File struct { Exclude []*Exclude Replace []*Replace Retract []*Retract + Tool []*Tool Syntax *FileSyntax } @@ -93,6 +94,12 @@ type Retract struct { Syntax *Line } +// A Tool is a single tool statement. +type Tool struct { + Path string + Syntax *Line +} + // A VersionInterval represents a range of versions with upper and lower bounds. // Intervals are closed: both bounds are included. When Low is equal to High, // the interval may refer to a single version ('v1.2.3') or an interval @@ -297,7 +304,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse }) } continue - case "module", "godebug", "require", "exclude", "replace", "retract": + case "module", "godebug", "require", "exclude", "replace", "retract", "tool": for _, l := range x.Line { f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) } @@ -509,6 +516,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a Syntax: line, } f.Retract = append(f.Retract, retract) + + case "tool": + if len(args) != 1 { + errorf("tool directive expects exactly one argument") + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + f.Tool = append(f.Tool, &Tool{ + Path: s, + Syntax: line, + }) } } @@ -1567,6 +1589,36 @@ func (f *File) DropRetract(vi VersionInterval) error { return nil } +// AddTool adds a new tool directive with the given path. +// It does nothing if the tool line already exists. +func (f *File) AddTool(path string) error { + for _, t := range f.Tool { + if t.Path == path { + return nil + } + } + + f.Tool = append(f.Tool, &Tool{ + Path: path, + Syntax: f.Syntax.addLine(nil, "tool", path), + }) + + f.SortBlocks() + return nil +} + +// RemoveTool removes a tool directive with the given path. +// It does nothing if no such tool directive exists. +func (f *File) DropTool(path string) error { + for _, t := range f.Tool { + if t.Path == path { + t.Syntax.markRemoved() + *t = Tool{} + } + } + return nil +} + func (f *File) SortBlocks() { f.removeDups() // otherwise sorting is unsafe @@ -1593,9 +1645,9 @@ func (f *File) SortBlocks() { } } -// removeDups removes duplicate exclude and replace directives. +// removeDups removes duplicate exclude, replace and tool directives. // -// Earlier exclude directives take priority. +// Earlier exclude and tool directives take priority. // // Later replace directives take priority. // @@ -1605,10 +1657,10 @@ func (f *File) SortBlocks() { // retract directives are not de-duplicated since comments are // meaningful, and versions may be retracted multiple times. func (f *File) removeDups() { - removeDups(f.Syntax, &f.Exclude, &f.Replace) + removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool) } -func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) { +func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool) { kill := make(map[*Line]bool) // Remove duplicate excludes. @@ -1649,6 +1701,24 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) { } *replace = repl + if tool != nil { + haveTool := make(map[string]bool) + for _, t := range *tool { + if haveTool[t.Path] { + kill[t.Syntax] = true + continue + } + haveTool[t.Path] = true + } + var newTool []*Tool + for _, t := range *tool { + if !kill[t.Syntax] { + newTool = append(newTool, t) + } + } + *tool = newTool + } + // Duplicate require and retract directives are not removed. // Drop killed statements from the syntax tree. diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go index 8f54897c..5387d0c2 100644 --- a/vendor/golang.org/x/mod/modfile/work.go +++ b/vendor/golang.org/x/mod/modfile/work.go @@ -331,5 +331,5 @@ func (f *WorkFile) SortBlocks() { // retract directives are not de-duplicated since comments are // meaningful, and versions may be retracted multiple times. func (f *WorkFile) removeDups() { - removeDups(f.Syntax, nil, &f.Replace) + removeDups(f.Syntax, nil, &f.Replace, nil) } diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index cac1a899..2a364b22 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -506,7 +506,6 @@ var badWindowsNames = []string{ "PRN", "AUX", "NUL", - "COM0", "COM1", "COM2", "COM3", @@ -516,7 +515,6 @@ var badWindowsNames = []string{ "COM7", "COM8", "COM9", - "LPT0", "LPT1", "LPT2", "LPT3", diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/net/LICENSE +++ b/vendor/golang.org/x/net/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index cf66309c..db1c95fa 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -3,29 +3,31 @@ // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries +// cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. +// name [context], and migrating to it can be done automatically with [go fix]. // -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// Incoming requests to a server should create a [Context], and outgoing +// calls to servers should accept a Context. The chain of function +// calls between them must propagate the Context, optionally replacing +// it with a derived Context created using [WithCancel], [WithDeadline], +// [WithTimeout], or [WithValue]. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. This is discussed further in +// https://go.dev/blog/context-and-structs. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -34,9 +36,30 @@ // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // -// See http://blog.golang.org/context for example code for a server that uses +// See https://go.dev/blog/context for example code for a server that uses // Contexts. -package context // import "golang.org/x/net/context" +// +// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// Canceled is the error returned by [Context.Err] when the context is canceled +// for some reason other than its deadline passing. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled +// due to its deadline passing. +var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, @@ -49,8 +72,73 @@ func Background() Context { // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. +// parameter). func TODO() Context { return todo } + +var ( + background = context.Background() + todo = context.TODO() +) + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// A CancelFunc may be called by multiple goroutines simultaneously. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc + +// WithCancel returns a derived context that points to the parent context +// but has a new Done channel. The returned context's Done channel is closed +// when the returned cancel function is called or when the parent context's +// Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + return context.WithCancel(parent) +} + +// WithDeadline returns a derived context that points to the parent context +// but has the deadline adjusted to be no later than d. If the parent's +// deadline is already earlier than d, WithDeadline(parent, d) is semantically +// equivalent to parent. The returned [Context.Done] channel is closed when +// the deadline expires, when the returned cancel function is called, +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + return context.WithDeadline(parent, d) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return context.WithTimeout(parent, timeout) +} + +// WithValue returns a derived context that points to the parent Context. +// In the derived context, the value associated with key is val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b8679..00000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a9..00000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3df..00000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a6380..00000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index 780968d6..e81b73e6 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -8,8 +8,8 @@ package http2 import ( "context" - "crypto/tls" "errors" + "net" "net/http" "sync" ) @@ -158,7 +158,7 @@ func (c *dialCall) dial(ctx context.Context, addr string) { // This code decides which ones live or die. // The return value used is whether c was used. // c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) { p.mu.Lock() for _, cc := range p.conns[key] { if cc.CanTakeNewRequest() { @@ -194,8 +194,8 @@ type addConnCall struct { err error } -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) +func (c *addConnCall) run(t *Transport, key string, nc net.Conn) { + cc, err := t.NewClientConn(nc) p := c.p p.mu.Lock() diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go new file mode 100644 index 00000000..ca645d9a --- /dev/null +++ b/vendor/golang.org/x/net/http2/config.go @@ -0,0 +1,122 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPServerConfig(&conf, h1) + setConfigDefaults(&conf, true) + return conf +} + +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 00000000..5b516c55 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 00000000..060fd6c6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b27..97bd8b06 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fh.Length > fr.maxReadSize { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) @@ -1490,7 +1501,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1509,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f..6c18ea23 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -37,6 +38,15 @@ var ( logFrameWrites bool logFrameReads bool inTests bool + + // Enabling extended CONNECT by causes browsers to attempt to use + // WebSockets-over-HTTP/2. This results in problems when the server's websocket + // package doesn't support extended CONNECT. + // + // Disable extended CONNECT by default for now. + // + // Issue #71128. + disableExtendedConnectProtocol = true ) func init() { @@ -49,6 +59,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=1") { + disableExtendedConnectProtocol = false + } } const ( @@ -140,6 +153,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -149,21 +166,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { @@ -237,13 +256,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +295,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +313,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") @@ -358,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) { s.v = save } -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// - a non-empty string starting with '/' -// - the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} - // incomparable is a zero-width, non-comparable type. Adding it to a struct // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3e..51fca38f 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -49,13 +50,18 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +133,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +211,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -336,7 +307,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -353,12 +324,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -440,13 +430,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +448,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +486,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +527,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +564,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +604,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +624,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +635,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +657,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -815,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048 func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() - buildCommonHeaderMapsOnce() - cv, ok := commonCanonHeader[v] + cv, ok := httpcommon.CachedCanonicalHeader(v) if ok { return cv } @@ -923,7 +920,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -935,20 +932,24 @@ func (sc *serverConn) serve() { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, - }, + write: settings, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +969,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +994,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1026,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1050,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1066,42 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1109,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1362,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1640,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -1757,6 +1808,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2160,7 +2214,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2182,19 +2236,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), } - isConnect := rp.method == "CONNECT" + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.Protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) + } + + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2208,12 +2268,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") + } + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2222,7 +2286,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2238,83 +2302,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -2855,6 +2874,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } @@ -3204,12 +3228,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. @@ -3301,7 +3325,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 98a49c6b..f26356b9 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,8 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" - "sort" "strconv" "strings" "sync" @@ -36,6 +34,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -203,6 +202,20 @@ func (t *Transport) markNewGoroutine() { } } +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() + } + return time.Now() +} + +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) + } + return time.Since(when) +} + // newTimer creates a new time.Timer, or a synthetic timer in tests. func (t *Transport) newTimer(d time.Duration) timer { if t.transportTestHooks != nil { @@ -227,40 +240,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -296,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -308,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -339,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -350,31 +368,55 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + closedOnIdle bool // true if conn was closed for idleness + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool + + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -432,12 +474,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -499,6 +541,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +551,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -554,6 +584,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -586,7 +618,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -597,7 +636,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -622,6 +661,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -640,9 +695,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -758,44 +814,38 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + seenSettingsChan: make(chan struct{}), + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,30 +857,25 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -838,11 +883,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -852,8 +895,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -871,7 +914,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -999,7 +1042,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -1031,16 +1074,40 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -1053,7 +1120,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1091,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1207,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() { // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = canonicalHeader(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", fmt.Errorf("invalid Trailer key %q", k) - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - func (cc *ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout @@ -1235,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - // actualContentLength returns a sanitized version of // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. @@ -1296,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true - } + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1415,6 +1432,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1426,8 +1445,11 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre cc := cs.cc ctx := cs.ctx - if err := checkConnHeaders(req); err != nil { - return err + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true } // Acquire the new-request lock by writing to reqHeaderMu. @@ -1436,6 +1458,18 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1574,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) - trailers, err := commaSeparatedTrailers(req) + cc.hbuf.Reset() + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { + cc.writeHeader(name, value) + }) if err != nil { - return err - } - hasTrailers := trailers != "" - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) - if err != nil { - return err + return fmt.Errorf("http2: %w", err) } + hdrs := cc.hbuf.Bytes() // Write the request. - endStream := !hasBody && !hasTrailers + endStream := !res.HasBody && !res.HasTrailers cs.sentHeaders = true err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) traceWroteHeaders(cs.trace) return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -1617,6 +1664,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1641,16 +1689,44 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + } + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1672,12 +1748,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -1947,214 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -func validateHeaders(hdrs http.Header) string { - for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { - return fmt.Sprintf("name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, - // because it may be sensitive. - return fmt.Sprintf("value for header %q", k) - } - } - } - return "" -} - -var errNilRequestURL = errors.New("http2: Request.URI is nil") - -// requires cc.wmu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - if req.URL == nil { - return nil, errNilRequestURL - } - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - if !httpguts.ValidHostHeader(host) { - return nil, errors.New("http2: invalid Host header") - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers+trailers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - if err := validateHeaders(req.Header); err != "" { - return nil, fmt.Errorf("invalid HTTP header %s", err) - } - if err := validateHeaders(req.Trailer); err != "" { - return nil, fmt.Errorf("invalid HTTP trailer %s", err) - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production, see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - m := req.Method - if m == "" { - m = http.MethodGet - } - f(":method", m) - if req.Method != "CONNECT" { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if asciiEqualFold(k, "connection") || - asciiEqualFold(k, "proxy-connection") || - asciiEqualFold(k, "transfer-encoding") || - asciiEqualFold(k, "upgrade") || - asciiEqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if asciiEqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } else if asciiEqualFold(k, "cookie") { - // Per 8.1.2.5 To allow for better compression efficiency, the - // Cookie header field MAY be split into separate header fields, - // each with one or more cookie-pairs. - for _, v := range vv { - for { - p := strings.IndexByte(v, ';') - if p < 0 { - break - } - f("cookie", v[:p]) - p++ - // strip space after semicolon if any. - for p+1 <= len(v) && v[p] == ' ' { - p++ - } - v = v[p:] - } - if len(v) > 0 { - f("cookie", v) - } - } - continue - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - trace := httptrace.ContextClientTrace(req.Context()) - traceHeaders := traceHasWroteHeaderField(trace) - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - name, ascii := lowerHeader(name) - if !ascii { - // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header - // field names have to be ASCII characters (just as in HTTP/1.x). - return - } - cc.writeHeader(name, value) - if traceHeaders { - traceWroteHeaderField(trace, name, value) - } - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - // requires cc.wmu be held. func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { cc.hbuf.Reset() @@ -2171,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := lowerHeader(k) + lowKey, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -2203,7 +2076,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2219,10 +2092,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2282,7 +2155,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2306,6 +2178,27 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2317,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() { } cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2349,7 +2249,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) @@ -2363,7 +2263,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2415,7 +2315,7 @@ func (rl *clientConnReadLoop) run() error { } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2503,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2511,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[canonicalHeader(v)] = nil + t[httpcommon.CanonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2533,15 +2433,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -2616,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -2725,7 +2644,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2860,9 +2779,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2956,6 +2888,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -2973,6 +2920,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -2981,7 +2929,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3010,7 +2958,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3085,6 +3033,12 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3107,20 +3061,27 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -3267,7 +3228,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() @@ -3304,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { } } -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 00000000..b2de2116 --- /dev/null +++ b/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398..fdb35b94 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -13,6 +13,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) // writeFramer is implemented by any type that is used to write frames. @@ -131,6 +132,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { @@ -341,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k, ascii := lowerHeader(k) + k, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go new file mode 100644 index 00000000..ed14da5a --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go similarity index 74% rename from vendor/golang.org/x/net/http2/headermap.go rename to vendor/golang.org/x/net/internal/httpcommon/headermap.go index 149b3dd2..92483d8e 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -1,11 +1,11 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http2 +package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,13 +82,15 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } } -func lowerHeader(v string) (lower string, ascii bool) { +// LowerHeader returns the lowercase form of a header name, +// used on the wire for HTTP/2 and HTTP/3 requests. +func LowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s, true @@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) { return asciiToLower(v) } -func canonicalHeader(v string) string { +// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".) +func CanonicalHeader(v string) string { buildCommonHeaderMapsOnce() if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) +} + +// CachedCanonicalHeader returns the canonical form of a well-known header name. +func CachedCanonicalHeader(v string) (string, bool) { + buildCommonHeaderMapsOnce() + s, ok := commonCanonHeader[v] + return s, ok } diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go new file mode 100644 index 00000000..4b705531 --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -0,0 +1,467 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import ( + "context" + "errors" + "fmt" + "net/http/httptrace" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +var ( + ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") +) + +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + +// EncodeHeadersParam is parameters to EncodeHeaders. +type EncodeHeadersParam struct { + Request Request + + // AddGzipHeader indicates that an "accept-encoding: gzip" header should be + // added to the request. + AddGzipHeader bool + + // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting. + PeerMaxHeaderListSize uint64 + + // DefaultUserAgent is the User-Agent header to send when the request + // neither contains a User-Agent nor disables it. + DefaultUserAgent string +} + +// EncodeHeadersParam is the result of EncodeHeaders. +type EncodeHeadersResult struct { + HasBody bool + HasTrailers bool +} + +// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3. +// It validates a request and calls headerf with each pseudo-header and header +// for the request. +// The headerf function is called with the validated, canonicalized header name. +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { + req := param.Request + + // Check for invalid connection-level headers. + if err := checkConnHeaders(req.Header); err != nil { + return res, err + } + + if req.URL == nil { + return res, errors.New("Request.URL is nil") + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return res, err + } + if !httpguts.ValidHostHeader(host) { + return res, errors.New("invalid Host header") + } + + // isNormalConnect is true if this is a non-extended CONNECT request. + isNormalConnect := false + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } + if req.Method == "CONNECT" && protocol == "" { + isNormalConnect = true + } else if protocol != "" && req.Method != "CONNECT" { + return res, errors.New("invalid :protocol header in non-CONNECT request") + } + + // Validate the path, except for non-extended CONNECT requests which have no path. + var path string + if !isNormalConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return res, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + if err := validateHeaders(req.Header); err != "" { + return res, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return res, fmt.Errorf("invalid HTTP trailer %s", err) + } + + trailers, err := commaSeparatedTrailers(req.Trailer) + if err != nil { + return res, err + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production, see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = "GET" + } + f(":method", m) + if !isNormalConnect { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if protocol != "" { + f(":protocol", protocol) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if asciiEqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if asciiEqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue + } else if k == ":protocol" { + // :protocol pseudo-header was already sent above. + continue + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) + } + if param.AddGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", param.DefaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + if param.PeerMaxHeaderListSize > 0 { + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > param.PeerMaxHeaderListSize { + return res, ErrRequestHeaderListSize + } + } + + trace := httptrace.ContextClientTrace(ctx) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name, ascii := LowerHeader(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } + + headerf(name, value) + + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(name, []string{value}) + } + }) + + res.HasBody = req.ActualContentLength != 0 + res.HasTrailers = trailers != "" + return res, nil +} + +// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header +// for a request. +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !disableCompression && + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + return true + } + return false +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// +// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3 +// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1 +// +// Certain headers are special-cased as okay but not transmitted later. +// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) + } + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) + } + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("invalid Connection request header: %q", vv) + } + return nil +} + +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { + k = CanonicalHeader(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +func validateHeaders(hdrs map[string][]string) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + +// shouldSendReqContentLength reports whether we should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returing an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go index cebde763..3c9576e2 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go @@ -4,27 +4,27 @@ package socket type iovec struct { - Base *byte - Len uint64 + Base *byte + Len uint64 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x10 - sizeofMsghdr = 0x30 + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go index cebde763..3c9576e2 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go @@ -4,27 +4,27 @@ package socket type iovec struct { - Base *byte - Len uint64 + Base *byte + Len uint64 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x10 - sizeofMsghdr = 0x30 + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index d7d4b8b6..32bdf435 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -7,6 +7,7 @@ package proxy import ( "context" "net" + "net/netip" "strings" ) @@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net. } func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { + if nip, err := netip.ParseAddr(host); err == nil { + ip := net.IP(nip.AsSlice()) for _, net := range p.bypassNetworks { if net.Contains(ip) { return p.bypass @@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) { } continue } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) + if nip, err := netip.ParseAddr(host); err == nil { + p.AddIP(net.IP(nip.AsSlice())) continue } if strings.HasPrefix(host, "*.") { diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 923a5780..3448d203 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -6,9 +6,10 @@ // as specified in RFC 6455. // // This package currently lacks some features found in an alternative -// and more actively maintained WebSocket package: +// and more actively maintained WebSocket packages: // -// https://pkg.go.dev/nhooyr.io/websocket +// - [github.com/gorilla/websocket] +// - [github.com/coder/websocket] package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c2..48dbb9d8 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go deleted file mode 100644 index d28140f7..00000000 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine - -package internal - -import "google.golang.org/appengine/urlfetch" - -func init() { - appengineClientHook = urlfetch.Client -} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index 572074a6..b9db01dd 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -18,16 +18,11 @@ var HTTPClient ContextKey // because nobody else can create a ContextKey, being unexported. type ContextKey struct{} -var appengineClientHook func(context.Context) *http.Client - func ContextClient(ctx context.Context) *http.Client { if ctx != nil { if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { return hc } } - if appengineClientHook != nil { - return appengineClientHook(ctx) - } return http.DefaultClient } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 90a2c3d6..eacdd7fd 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is @@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) { if tf.refreshToken != tk.RefreshToken { tf.refreshToken = tk.RefreshToken } - return tk, err + return tk, nil } // reuseTokenSource is a TokenSource that holds a single token in memory @@ -356,11 +356,15 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { return internal.ContextClient(ctx) } + cc := internal.ContextClient(ctx) return &http.Client{ Transport: &Transport{ - Base: internal.ContextClient(ctx).Transport, + Base: cc.Transport, Source: ReuseTokenSource(nil, src), }, + CheckRedirect: cc.CheckRedirect, + Jar: cc.Jar, + Timeout: cc.Timeout, } } @@ -393,7 +397,7 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the // TokenSource returned by ReuseTokenSource, except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index 50593b6d..6a95da97 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -21,7 +21,7 @@ const ( // // A fresh verifier should be generated for each authorization. // S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange // (or Config.DeviceAccessToken). func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb3321..8c31136c 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} @@ -162,7 +169,7 @@ func tokenFromInternal(t *internal.Token) *Token { // retrieveToken takes a *Config and uses that to retrieve an *internal.Token. // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. +// with an error. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee6..f8c3c092 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -18,7 +18,7 @@ import ( type token struct{} // A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. +// the same overall task. A Group should not be reused for different tasks. // // A zero Group is valid, has no limit on the number of active goroutines, // and does not cancel on error. @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -61,6 +61,7 @@ func (g *Group) Wait() error { } // Go calls the given function in a new goroutine. +// The first call to Go must happen before a Wait. // It blocks until the new goroutine can be added without the number of // active goroutines in the group exceeding the configured limit. // @@ -118,6 +119,7 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b..00000000 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce3343..00000000 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 02609d5b..2e73ee19 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -72,6 +72,9 @@ var X86 struct { HasSSSE3 bool // Supplemental streaming SIMD extension 3 HasSSE41 bool // Streaming SIMD extension 4 and 4.1 HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add + HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions + HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions _ CacheLinePad } @@ -146,6 +149,18 @@ var ARM struct { _ CacheLinePad } +// The booleans in Loong64 contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +var Loong64 struct { + _ CacheLinePad + HasLSX bool // support 128-bit vector extension + HasLASX bool // support 256-bit vector extension + HasCRC32 bool // support CRC instruction + HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction + HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} instruction + _ CacheLinePad +} + // MIPS64X contains the supported CPU features of the current mips64/mips64le // platforms. If the current platform is not mips64/mips64le or the current // operating system is not Linux then all feature flags are false. diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go new file mode 100644 index 00000000..4f341143 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel. +const ( + hwcap_LOONGARCH_LSX = 1 << 4 + hwcap_LOONGARCH_LASX = 1 << 5 +) + +func doinit() { + // TODO: Features that require kernel support like LSX and LASX can + // be detected here once needed in std library or by the compiler. + Loong64.HasLSX = hwcIsSet(hwCap, hwcap_LOONGARCH_LSX) + Loong64.HasLASX = hwcIsSet(hwCap, hwcap_LOONGARCH_LASX) +} + +func hwcIsSet(hwc uint, val uint) bool { + return hwc&val != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index 7d902b68..a428dec9 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 +//go:build linux && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go index 55863585..45ecb29a 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -8,5 +8,43 @@ package cpu const cacheLineSize = 64 +// Bit fields for CPUCFG registers, Related reference documents: +// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg +const ( + // CPUCFG1 bits + cpucfg1_CRC32 = 1 << 25 + + // CPUCFG2 bits + cpucfg2_LAM_BH = 1 << 27 + cpucfg2_LAMCAS = 1 << 28 +) + func initOptions() { + options = []option{ + {Name: "lsx", Feature: &Loong64.HasLSX}, + {Name: "lasx", Feature: &Loong64.HasLASX}, + {Name: "crc32", Feature: &Loong64.HasCRC32}, + {Name: "lam_bh", Feature: &Loong64.HasLAM_BH}, + {Name: "lamcas", Feature: &Loong64.HasLAMCAS}, + } + + // The CPUCFG data on Loong64 only reflects the hardware capabilities, + // not the kernel support status, so features such as LSX and LASX that + // require kernel support cannot be obtained from the CPUCFG data. + // + // These features only require hardware capability support and do not + // require kernel specific support, so they can be obtained directly + // through CPUCFG + cfg1 := get_cpucfg(1) + cfg2 := get_cpucfg(2) + + Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32) + Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAMCAS) + Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAM_BH) +} + +func get_cpucfg(reg uint32) uint32 + +func cfgIsSet(cfg uint32, val uint32) bool { + return cfg&val != 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.s b/vendor/golang.org/x/sys/cpu/cpu_loong64.s new file mode 100644 index 00000000..71cbaf1c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.s @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func get_cpucfg(reg uint32) uint32 +TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0 + MOVW reg+0(FP), R5 + // CPUCFG R5, R4 = 0x00006ca4 + WORD $0x00006ca4 + MOVW R4, ret+8(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 600a6807..1e642f33 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -53,6 +53,9 @@ func initOptions() { {Name: "sse41", Feature: &X86.HasSSE41}, {Name: "sse42", Feature: &X86.HasSSE42}, {Name: "ssse3", Feature: &X86.HasSSSE3}, + {Name: "avxifma", Feature: &X86.HasAVXIFMA}, + {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, + {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, // These capabilities should always be enabled on amd64: {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, @@ -106,7 +109,7 @@ func archInit() { return } - _, ebx7, ecx7, edx7 := cpuid(7, 0) + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(3, ebx7) X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasBMI2 = isSet(8, ebx7) @@ -134,14 +137,24 @@ func archInit() { X86.HasAVX512VAES = isSet(9, ecx7) X86.HasAVX512VBMI2 = isSet(6, ecx7) X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) } X86.HasAMXTile = isSet(24, edx7) X86.HasAMXInt8 = isSet(25, edx7) X86.HasAMXBF16 = isSet(22, edx7) + + // These features depend on the second level of extended features. + if eax7 >= 1 { + eax71, _, _, edx71 := cpuid(7, 1) + if X86.HasAVX512 { + X86.HasAVX512BF16 = isSet(5, eax71) + } + if X86.HasAVX { + X86.HasAVXIFMA = isSet(23, eax71) + X86.HasAVXVNNI = isSet(4, eax71) + X86.HasAVXVNNIInt8 = isSet(4, edx71) + } + } } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go index 762b63d6..56a7e1a1 100644 --- a/vendor/golang.org/x/sys/cpu/parse.go +++ b/vendor/golang.org/x/sys/cpu/parse.go @@ -13,7 +13,7 @@ import "strconv" // https://golang.org/cl/209597. func parseRelease(rel string) (major, minor, patch int, ok bool) { // Strip anything after a dash or plus. - for i := 0; i < len(rel); i++ { + for i := range len(rel) { if rel[i] == '-' || rel[i] == '+' { rel = rel[:i] break @@ -21,7 +21,7 @@ func parseRelease(rel string) (major, minor, patch int, ok bool) { } next := func() (int, bool) { - for i := 0; i < len(rel); i++ { + for i := range len(rel) { if rel[i] == '.' { ver, err := strconv.Atoi(rel[:i]) rel = rel[i+1:] diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 00000000..37a82528 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 00000000..1200487f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 099867de..798f61ad 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,7 +602,150 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) +// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) +const minIovec = 8 + +func Readv(fd int, iovs [][]byte) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = readv(fd, iovecs) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = preadv(fd, iovecs, offset) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Writev(fd int, iovs [][]byte) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = writev(fd, iovecs) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = pwritev(fd, iovecs, offset) + writevRacedetect(iovecs, n) + return n, err +} + +func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { + for _, b := range bs { + var v Iovec + v.SetLen(len(b)) + if len(b) > 0 { + v.Base = &b[0] + } else { + v.Base = (*byte)(unsafe.Pointer(&_zero)) + } + vecs = append(vecs, v) + } + return vecs +} + +func writevRacedetect(iovecs []Iovec, n int) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceReadRange(unsafe.Pointer(iovecs[i].Base), m) + } + } +} + +func readvRacedetect(iovecs []Iovec, n int, err error) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) + } + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } +} + +func darwinMajorMinPatch() (maj, min, patch int, err error) { + var un Utsname + err = Uname(&un) + if err != nil { + return + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range un.Release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return 0, 0, 0, ENOTSUP + } + case b == 0: + break Loop + default: + return 0, 0, 0, ENOTSUP + } + } + if c != 2 { + return 0, 0, 0, ENOTSUP + } + return mmp[0], mmp[1], mmp[2], nil +} + +func darwinKernelVersionMin(maj, min, patch int) bool { + actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() + if err != nil { + return false + } + return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) @@ -705,3 +848,7 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys readv(fd int, iovecs []Iovec) (n int, err error) +//sys preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) +//sys writev(fd int, iovecs []Iovec) (n int, err error) +//sys pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f..be8c0020 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 230a9454..4958a657 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,6 +13,7 @@ package unix import ( "encoding/binary" + "slices" "strconv" "syscall" "time" @@ -417,7 +418,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { return nil, 0, EINVAL } sa.raw.Family = AF_UNIX - for i := 0; i < n; i++ { + for i := range n { sa.raw.Path[i] = int8(name[i]) } // length is family (uint16), name, NUL. @@ -507,7 +508,7 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm)) psm[0] = byte(sa.PSM) psm[1] = byte(sa.PSM >> 8) - for i := 0; i < len(sa.Addr); i++ { + for i := range len(sa.Addr) { sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i] } cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid)) @@ -589,11 +590,11 @@ func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i] = rx[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+4] = tx[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil @@ -618,11 +619,11 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) n := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Addr[i] = n[i] } p := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+8] = p[i] } sa.raw.Addr[12] = sa.Addr @@ -911,7 +912,7 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { // These are EBCDIC encoded by the kernel, but we still need to pad them // with blanks. Initializing with blanks allows the caller to feed in either // a padded or an unpadded string. - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Nodeid[i] = ' ' sa.raw.User_id[i] = ' ' sa.raw.Name[i] = ' ' @@ -1148,7 +1149,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { var user [8]byte var name [8]byte - for i := 0; i < 8; i++ { + for i := range 8 { user[i] = byte(pp.User_id[i]) name[i] = byte(pp.Name[i]) } @@ -1173,11 +1174,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } name := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { name[i] = pp.Addr[i] } pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { pgn[i] = pp.Addr[i+8] } addr := (*[1]byte)(unsafe.Pointer(&sa.Addr)) @@ -1188,11 +1189,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { rx[i] = pp.Addr[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { tx[i] = pp.Addr[i+4] } return sa, nil @@ -2216,10 +2217,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2270,10 +2268,7 @@ func writevRacedetect(iovecs []Iovec, n int) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceReadRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2320,12 +2315,7 @@ func isGroupMember(gid int) bool { return false } - for _, g := range groups { - if g == gid { - return true - } - } - return false + return slices.Contains(groups, gid) } func isCapDacOverrideSet() bool { diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af0..abc39554 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 6ebc48b3..4f432bfe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1245,6 +1245,7 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1330,8 +1331,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1551,6 +1554,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1623,6 +1627,8 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1867,6 +1873,7 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1967,6 +1974,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2083,6 +2091,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2163,6 +2172,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2491,6 +2501,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2499,6 +2510,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2525,6 +2537,8 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2592,6 +2606,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2602,6 +2617,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2911,7 +2929,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2920,6 +2937,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index c0d45e32..75207613 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -304,6 +306,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c731d24f..c68acda5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,6 +307,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 680018a4..a8c607ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -310,6 +312,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index a63909f3..18563dd8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,6 +109,7 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 @@ -119,6 +120,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -302,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9b0a2573..22912cda 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -297,6 +299,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 958e6e06..29344eb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 50c7f25b..20d51fb9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ced21d66..321b6090 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 226c0441..9bacdf1e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 3122737c..c2242726 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -358,6 +360,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eb5d3467..6270c8ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e921ebc6..9966c194 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 38ba81c5..848e5fcc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -294,6 +296,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 71f04009..669b2adb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -366,6 +368,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c44a3133..4834e575 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -357,6 +359,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 24b346e1..813c05b6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index ebd21310..fda32858 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 824b9c2d..e6f58f3c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 4f178a22..7f8998b9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87fe..c6545413 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 524b0820..c79aaff3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,4 +458,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f485dbf4..5eb45069 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,4 +381,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 70b35bf3..05e50297 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,4 +422,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1893e2fe..38c53ec5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,4 +325,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 16a4017d..31d2e71a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,4 +321,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 7e567f1e..f4184a33 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 38ae55e5..05b99622 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 55e92e60..43a256e9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 60658d6a..eea5ddfc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index e203e8a7..0d777bfb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,4 +449,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5944b97d..b4463650 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index c66d416d..0c7d21c1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a5459e76..84053916 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,4 +326,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 01d86825..fcf1b790 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,4 +387,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 7b703e77..52d15b5f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,4 +400,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 5537148d..a46abe64 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4747,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14c + NL80211_ATTR_MAX = 0x14d NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5519,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6174,3 +6174,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 4e613cf6..3ca814f5 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index fd863244..39aeeb64 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -164,7 +164,12 @@ loopItems: func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { var h syscall.Handle var d uint32 - err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + var pathPointer *uint16 + pathPointer, err = syscall.UTF16PtrFromString(path) + if err != nil { + return 0, false, err + } + err = regCreateKeyEx(syscall.Handle(k), pathPointer, 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) if err != nil { return 0, false, err @@ -174,7 +179,11 @@ func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool // DeleteKey deletes the subkey path of key k and its values. func DeleteKey(k Key, path string) error { - return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) + pathPointer, err := syscall.UTF16PtrFromString(path) + if err != nil { + return err + } + return regDeleteKey(syscall.Handle(k), pathPointer) } // A KeyInfo describes the statistics of a key. It is returned by Stat. diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go index 74db26b9..a1bcbb23 100644 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -340,7 +340,11 @@ func (k Key) SetBinaryValue(name string, value []byte) error { // DeleteValue removes a named value from the key k. func (k Key) DeleteValue(name string) error { - return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) + namePointer, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + return regDeleteValue(syscall.Handle(k), namePointer) } // ReadValueNames returns the value names of key k. diff --git a/vendor/golang.org/x/sys/windows/svc/eventlog/log.go b/vendor/golang.org/x/sys/windows/svc/eventlog/log.go index f279444d..ad40c2f4 100644 --- a/vendor/golang.org/x/sys/windows/svc/eventlog/log.go +++ b/vendor/golang.org/x/sys/windows/svc/eventlog/log.go @@ -29,11 +29,19 @@ func OpenRemote(host, source string) (*Log, error) { if source == "" { return nil, errors.New("Specify event log source") } - var s *uint16 + var hostPointer *uint16 if host != "" { - s = syscall.StringToUTF16Ptr(host) + var err error + hostPointer, err = syscall.UTF16PtrFromString(host) + if err != nil { + return nil, err + } } - h, err := windows.RegisterEventSource(s, syscall.StringToUTF16Ptr(source)) + sourcePointer, err := syscall.UTF16PtrFromString(source) + if err != nil { + return nil, err + } + h, err := windows.RegisterEventSource(hostPointer, sourcePointer) if err != nil { return nil, err } @@ -46,7 +54,11 @@ func (l *Log) Close() error { } func (l *Log) report(etype uint16, eid uint32, msg string) error { - ss := []*uint16{syscall.StringToUTF16Ptr(msg)} + msgPointer, err := syscall.UTF16PtrFromString(msg) + if err != nil { + return err + } + ss := []*uint16{msgPointer} return windows.ReportEvent(l.Handle, etype, 0, eid, 0, 1, 0, &ss[0], nil) } diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/config.go b/vendor/golang.org/x/sys/windows/svc/mgr/config.go index 3c7ba08f..ec01f96e 100644 --- a/vendor/golang.org/x/sys/windows/svc/mgr/config.go +++ b/vendor/golang.org/x/sys/windows/svc/mgr/config.go @@ -121,7 +121,11 @@ func (s *Service) Config() (Config, error) { } func updateDescription(handle windows.Handle, desc string) error { - d := windows.SERVICE_DESCRIPTION{Description: toPtr(desc)} + descPointer, err := toPtr(desc) + if err != nil { + return err + } + d := windows.SERVICE_DESCRIPTION{Description: descPointer} return windows.ChangeServiceConfig2(handle, windows.SERVICE_CONFIG_DESCRIPTION, (*byte)(unsafe.Pointer(&d))) } @@ -141,10 +145,30 @@ func updateStartUp(handle windows.Handle, isDelayed bool) error { // UpdateConfig updates service s configuration parameters. func (s *Service) UpdateConfig(c Config) error { - err := windows.ChangeServiceConfig(s.Handle, c.ServiceType, c.StartType, - c.ErrorControl, toPtr(c.BinaryPathName), toPtr(c.LoadOrderGroup), - nil, toStringBlock(c.Dependencies), toPtr(c.ServiceStartName), - toPtr(c.Password), toPtr(c.DisplayName)) + binaryPathNamePointer, err := toPtr(c.BinaryPathName) + if err != nil { + return err + } + loadOrderGroupPointer, err := toPtr(c.LoadOrderGroup) + if err != nil { + return err + } + serviceStartNamePointer, err := toPtr(c.ServiceStartName) + if err != nil { + return err + } + passwordPointer, err := toPtr(c.Password) + if err != nil { + return err + } + displayNamePointer, err := toPtr(c.DisplayName) + if err != nil { + return err + } + err = windows.ChangeServiceConfig(s.Handle, c.ServiceType, c.StartType, + c.ErrorControl, binaryPathNamePointer, loadOrderGroupPointer, + nil, toStringBlock(c.Dependencies), serviceStartNamePointer, + passwordPointer, displayNamePointer) if err != nil { return err } diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go index dbfd729f..74755940 100644 --- a/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go +++ b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go @@ -34,7 +34,11 @@ func Connect() (*Mgr, error) { func ConnectRemote(host string) (*Mgr, error) { var s *uint16 if host != "" { - s = syscall.StringToUTF16Ptr(host) + var err error + s, err = syscall.UTF16PtrFromString(host) + if err != nil { + return nil, err + } } h, err := windows.OpenSCManager(s, nil, windows.SC_MANAGER_ALL_ACCESS) if err != nil { @@ -78,11 +82,11 @@ func (m *Mgr) LockStatus() (*LockStatus, error) { } } -func toPtr(s string) *uint16 { +func toPtr(s string) (*uint16, error) { if len(s) == 0 { - return nil + return nil, nil } - return syscall.StringToUTF16Ptr(s) + return syscall.UTF16PtrFromString(s) } // toStringBlock terminates strings in ss with 0, and then @@ -122,10 +126,34 @@ func (m *Mgr) CreateService(name, exepath string, c Config, args ...string) (*Se for _, v := range args { s += " " + syscall.EscapeArg(v) } - h, err := windows.CreateService(m.Handle, toPtr(name), toPtr(c.DisplayName), + namePointer, err := toPtr(name) + if err != nil { + return nil, err + } + displayNamePointer, err := toPtr(c.DisplayName) + if err != nil { + return nil, err + } + sPointer, err := toPtr(s) + if err != nil { + return nil, err + } + loadOrderGroupPointer, err := toPtr(c.LoadOrderGroup) + if err != nil { + return nil, err + } + serviceStartNamePointer, err := toPtr(c.ServiceStartName) + if err != nil { + return nil, err + } + passwordPointer, err := toPtr(c.Password) + if err != nil { + return nil, err + } + h, err := windows.CreateService(m.Handle, namePointer, displayNamePointer, windows.SERVICE_ALL_ACCESS, c.ServiceType, - c.StartType, c.ErrorControl, toPtr(s), toPtr(c.LoadOrderGroup), - nil, toStringBlock(c.Dependencies), toPtr(c.ServiceStartName), toPtr(c.Password)) + c.StartType, c.ErrorControl, sPointer, loadOrderGroupPointer, + nil, toStringBlock(c.Dependencies), serviceStartNamePointer, passwordPointer) if err != nil { return nil, err } @@ -159,7 +187,12 @@ func (m *Mgr) CreateService(name, exepath string, c Config, args ...string) (*Se // OpenService retrieves access to service name, so it can // be interrogated and controlled. func (m *Mgr) OpenService(name string) (*Service, error) { - h, err := windows.OpenService(m.Handle, syscall.StringToUTF16Ptr(name), windows.SERVICE_ALL_ACCESS) + namePointer, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, err + } + + h, err := windows.OpenService(m.Handle, namePointer, windows.SERVICE_ALL_ACCESS) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go b/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go index ef2a6878..1a073748 100644 --- a/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go +++ b/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go @@ -99,8 +99,13 @@ func (s *Service) ResetPeriod() (uint32, error) { // SetRebootMessage sets service s reboot message. // If msg is "", the reboot message is deleted and no message is broadcast. func (s *Service) SetRebootMessage(msg string) error { + msgPointer, err := syscall.UTF16PtrFromString(msg) + if err != nil { + return err + } + rActions := windows.SERVICE_FAILURE_ACTIONS{ - RebootMsg: syscall.StringToUTF16Ptr(msg), + RebootMsg: msgPointer, } return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&rActions))) } @@ -118,8 +123,13 @@ func (s *Service) RebootMessage() (string, error) { // SetRecoveryCommand sets the command line of the process to execute in response to the RunCommand service controller action. // If cmd is "", the command is deleted and no program is run when the service fails. func (s *Service) SetRecoveryCommand(cmd string) error { + cmdPointer, err := syscall.UTF16PtrFromString(cmd) + if err != nil { + return err + } + rActions := windows.SERVICE_FAILURE_ACTIONS{ - Command: syscall.StringToUTF16Ptr(cmd), + Command: cmdPointer, } return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&rActions))) } diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/service.go b/vendor/golang.org/x/sys/windows/svc/mgr/service.go index c9740ef0..9f59eab7 100644 --- a/vendor/golang.org/x/sys/windows/svc/mgr/service.go +++ b/vendor/golang.org/x/sys/windows/svc/mgr/service.go @@ -37,7 +37,11 @@ func (s *Service) Start(args ...string) error { if len(args) > 0 { vs := make([]*uint16, len(args)) for i := range vs { - vs[i] = syscall.StringToUTF16Ptr(args[i]) + argPointer, err := syscall.UTF16PtrFromString(args[i]) + if err != nil { + return err + } + vs[i] = argPointer } p = &vs[0] } diff --git a/vendor/golang.org/x/sys/windows/svc/service.go b/vendor/golang.org/x/sys/windows/svc/service.go index c4f74924..a9b1c192 100644 --- a/vendor/golang.org/x/sys/windows/svc/service.go +++ b/vendor/golang.org/x/sys/windows/svc/service.go @@ -132,10 +132,10 @@ type ctlEvent struct { // service provides access to windows service api. type service struct { - name string - h windows.Handle - c chan ctlEvent - handler Handler + namePointer *uint16 + h windows.Handle + c chan ctlEvent + handler Handler } type exitCode struct { @@ -209,7 +209,7 @@ var theService service // This is, unfortunately, a global, which means only one // serviceMain is the entry point called by the service manager, registered earlier by // the call to StartServiceCtrlDispatcher. func serviceMain(argc uint32, argv **uint16) uintptr { - handle, err := windows.RegisterServiceCtrlHandlerEx(windows.StringToUTF16Ptr(theService.name), ctlHandlerCallback, 0) + handle, err := windows.RegisterServiceCtrlHandlerEx(theService.namePointer, ctlHandlerCallback, 0) if sysErr, ok := err.(windows.Errno); ok { return uintptr(sysErr) } else if err != nil { @@ -280,15 +280,21 @@ loop: // Run executes service name by calling appropriate handler function. func Run(name string, handler Handler) error { + // Check to make sure that the service name is valid. + namePointer, err := windows.UTF16PtrFromString(name) + if err != nil { + return err + } + initCallbacks.Do(func() { ctlHandlerCallback = windows.NewCallback(ctlHandler) serviceMainCallback = windows.NewCallback(serviceMain) }) - theService.name = name + theService.namePointer = namePointer theService.handler = handler theService.c = make(chan ctlEvent) t := []windows.SERVICE_TABLE_ENTRY{ - {ServiceName: windows.StringToUTF16Ptr(theService.name), ServiceProc: serviceMainCallback}, + {ServiceName: namePointer, ServiceProc: serviceMainCallback}, {ServiceName: nil, ServiceProc: 0}, } return windows.StartServiceCtrlDispatcher(&t[0]) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 9d138de5..ad67df2f 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1074,6 +1074,7 @@ const ( IP_ADD_MEMBERSHIP = 0xc IP_DROP_MEMBERSHIP = 0xd IP_PKTINFO = 0x13 + IP_MTU_DISCOVER = 0x47 IPV6_V6ONLY = 0x1b IPV6_UNICAST_HOPS = 0x4 @@ -1083,6 +1084,7 @@ const ( IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd IPV6_PKTINFO = 0x13 + IPV6_MTU_DISCOVER = 0x47 MSG_OOB = 0x1 MSG_PEEK = 0x2 @@ -1132,6 +1134,15 @@ const ( WSASYS_STATUS_LEN = 128 ) +// enum PMTUD_STATE from ws2ipdef.h +const ( + IP_PMTUDISC_NOT_SET = 0 + IP_PMTUDISC_DO = 1 + IP_PMTUDISC_DONT = 2 + IP_PMTUDISC_PROBE = 3 + IP_PMTUDISC_MAX = 4 +) + type WSABuf struct { Len uint32 Buf *byte @@ -1146,6 +1157,22 @@ type WSAMsg struct { Flags uint32 } +type WSACMSGHDR struct { + Len uintptr + Level int32 + Type int32 +} + +type IN_PKTINFO struct { + Addr [4]byte + Ifindex uint32 +} + +type IN6_PKTINFO struct { + Addr [16]byte + Ifindex uint32 +} + // Flags for WSASocket const ( WSA_FLAG_OVERLAPPED = 0x01 diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index f636667f..14f89470 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -44,6 +44,8 @@ type Terminal struct { // bytes, as an index into |line|). If it returns ok=false, the key // press is processed normally. Otherwise it returns a replacement line // and the new cursor position. + // + // This will be disabled during ReadPassword. AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) // Escape contains a pointer to the escape codes for this terminal. @@ -692,6 +694,8 @@ func (t *Terminal) Write(buf []byte) (n int, err error) { // ReadPassword temporarily changes the prompt and reads a password, without // echo, from the terminal. +// +// The AutoCompleteCallback is disabled during this call. func (t *Terminal) ReadPassword(prompt string) (line string, err error) { t.lock.Lock() defer t.lock.Unlock() @@ -699,6 +703,11 @@ func (t *Terminal) ReadPassword(prompt string) (line string, err error) { oldPrompt := t.prompt t.prompt = []rune(prompt) t.echo = false + oldAutoCompleteCallback := t.AutoCompleteCallback + t.AutoCompleteCallback = nil + defer func() { + t.AutoCompleteCallback = oldAutoCompleteCallback + }() line, err = t.readLine() diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go index 4d57222e..053336e2 100644 --- a/vendor/golang.org/x/text/language/parse.go +++ b/vendor/golang.org/x/text/language/parse.go @@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) { if changed { tt.RemakeString() } - return makeTag(tt), err + return makeTag(tt), nil } // Compose creates a Tag from individual parts, which may be of type Tag, Base, diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go new file mode 100644 index 00000000..47a9a541 --- /dev/null +++ b/vendor/golang.org/x/tools/cover/profile.go @@ -0,0 +1,266 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cover provides support for parsing coverage profiles +// generated by "go test -coverprofile=cover.out". +package cover // import "golang.org/x/tools/cover" + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data in the specified file and returns a +// Profile for each source file described therein. +func ParseProfiles(fileName string) ([]*Profile, error) { + pf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer pf.Close() + return ParseProfilesFromReader(pf) +} + +// ParseProfilesFromReader parses profile data from the Reader and +// returns a Profile for each source file described therein. +func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) { + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + files := make(map[string]*Profile) + s := bufio.NewScanner(rd) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + fn, b, err := parseLine(line) + if err != nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err) + } + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, b) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + // Merge samples from the same location. + j := 1 + for i := 1; i < len(p.Blocks); i++ { + b := p.Blocks[i] + last := p.Blocks[j-1] + if b.StartLine == last.StartLine && + b.StartCol == last.StartCol && + b.EndLine == last.EndLine && + b.EndCol == last.EndCol { + if b.NumStmt != last.NumStmt { + return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt) + } + if mode == "set" { + p.Blocks[j-1].Count |= b.Count + } else { + p.Blocks[j-1].Count += b.Count + } + continue + } + p.Blocks[j] = b + j++ + } + p.Blocks = p.Blocks[:j] + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +// parseLine parses a line from a coverage file. +// It is equivalent to the regex +// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$ +// +// However, it is much faster: https://golang.org/cl/179377 +func parseLine(l string) (fileName string, block ProfileBlock, err error) { + end := len(l) + + b := ProfileBlock{} + b.Count, end, err = seekBack(l, ' ', end, "Count") + if err != nil { + return "", b, err + } + b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt") + if err != nil { + return "", b, err + } + b.EndCol, end, err = seekBack(l, '.', end, "EndCol") + if err != nil { + return "", b, err + } + b.EndLine, end, err = seekBack(l, ',', end, "EndLine") + if err != nil { + return "", b, err + } + b.StartCol, end, err = seekBack(l, '.', end, "StartCol") + if err != nil { + return "", b, err + } + b.StartLine, end, err = seekBack(l, ':', end, "StartLine") + if err != nil { + return "", b, err + } + fn := l[0:end] + if fn == "" { + return "", b, errors.New("a FileName cannot be blank") + } + return fn, b, nil +} + +// seekBack searches backwards from end to find sep in l, then returns the +// value between sep and end as an integer. +// If seekBack fails, the returned error will reference what. +func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) { + // Since we're seeking backwards and we know only ASCII is legal for these values, + // we can ignore the possibility of non-ASCII characters. + for start := end - 1; start >= 0; start-- { + if l[start] == sep { + i, err := strconv.Atoi(l[start+1 : end]) + if err != nil { + return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err) + } + if i < 0 { + return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i) + } + return i, start, nil + } + } + return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what) +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. + Index int // Order in input file. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + index := 0 + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count, Index: index} + index++ + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col || line > b.EndLine { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + // Boundaries at the same offset should be ordered according to + // their original position. + return b[i].Index < b[j].Index + } + return b[i].Offset < b[j].Offset +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 2c4c4e23..6e34df46 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -106,8 +106,21 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // Does augmented child strictly contain [start, end)? if augPos <= start && end <= augEnd { - _, isToken := child.(tokenNode) - return isToken || visit(child) + if is[tokenNode](child) { + return true + } + + // childrenOf elides the FuncType node beneath FuncDecl. + // Add it back here for TypeParams, Params, Results, + // all FieldLists). But we don't add it back for the "func" token + // even though it is is the tree at FuncDecl.Type.Func. + if decl, ok := node.(*ast.FuncDecl); ok { + if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { + path = append(path, decl.Type) + } + } + + return visit(child) } // Does [start, end) overlap multiple children? @@ -313,6 +326,8 @@ func childrenOf(n ast.Node) []ast.Node { // // As a workaround, we inline the case for FuncType // here and order things correctly. + // We also need to insert the elided FuncType just + // before the 'visit' recursion. // children = nil // discard ast.Walk(FuncDecl) info subtrees children = append(children, tok(n.Type.Func, len("func"))) @@ -632,3 +647,8 @@ func NodeDescription(n ast.Node) string { } panic(fmt.Sprintf("unexpected node type: %T", n)) } + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 18d1adb0..5e5601aa 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -9,6 +9,7 @@ import ( "fmt" "go/ast" "go/token" + "slices" "strconv" "strings" ) @@ -186,7 +187,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() first.Specs = append(first.Specs, spec) } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + f.Decls = slices.Delete(f.Decls, i, i+1) i-- } @@ -344,7 +345,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r } // UsesImport reports whether a given import is used. +// The provided File must have been parsed with syntactic object resolution +// (not using go/parser.SkipObjectResolution). func UsesImport(f *ast.File, path string) (used bool) { + if f.Scope == nil { + panic("file f was not parsed with syntactic object resolution") + } spec := importSpec(f, path) if spec == nil { return diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 58934f76..5c8dbbb7 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -183,7 +183,7 @@ type application struct { func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { // convert typed nil into untyped nil - if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + if v := reflect.ValueOf(n); v.Kind() == reflect.Pointer && v.IsNil() { n = nil } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go index 919d5305..c820b208 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -7,12 +7,7 @@ package astutil import "go/ast" // Unparen returns e with any enclosing parentheses stripped. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} +// Deprecated: use [ast.Unparen]. +// +//go:fix inline +func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 1fc1de0b..1da4a361 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -10,6 +10,7 @@ // builds a list of push/pop events and their node type. Subsequent // method calls that request a traversal scan this list, rather than walk // the AST, and perform type filtering using efficient bit sets. +// This representation is sometimes called a "balanced parenthesis tree." // // Experiments suggest the inspector's traversals are about 2.5x faster // than ast.Inspect, but it may take around 5 traversals for this @@ -36,6 +37,9 @@ package inspector import ( "go/ast" + _ "unsafe" + + "golang.org/x/tools/internal/astutil/edge" ) // An Inspector provides methods for inspecting @@ -44,6 +48,25 @@ type Inspector struct { events []event } +//go:linkname events +func events(in *Inspector) []event { return in.events } + +//go:linkname packEdgeKindAndIndex +func packEdgeKindAndIndex(ek edge.Kind, index int) int32 { + return int32(uint32(index+1)<<7 | uint32(ek)) +} + +// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within +// an []ast.Node slice) from the parent field of a pop event. +// +//go:linkname unpackEdgeKindAndIndex +func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) { + // The "parent" field of a pop node holds the + // edge Kind in the lower 7 bits and the index+1 + // in the upper 25. + return edge.Kind(x & 0x7f), int(x>>7) - 1 +} + // New returns an Inspector for the specified syntax trees. func New(files []*ast.File) *Inspector { return &Inspector{traverse(files)} @@ -52,9 +75,10 @@ func New(files []*ast.File) *Inspector { // An event represents a push or a pop // of an ast.Node during a traversal. type event struct { - node ast.Node - typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events - index int // index of corresponding push or pop event + node ast.Node + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int32 // index of corresponding push or pop event + parent int32 // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only) } // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). @@ -73,8 +97,17 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // check, Preorder is almost twice as fast as Nodes. The two // features seem to contribute similar slowdowns (~1.4x each). + // This function is equivalent to the PreorderSeq call below, + // but to avoid the additional dynamic call (which adds 13-35% + // to the benchmarks), we expand it out. + // + // in.PreorderSeq(types...)(func(n ast.Node) bool { + // f(n) + // return true + // }) + mask := maskOf(types) - for i := 0; i < len(in.events); { + for i := int32(0); i < int32(len(in.events)); { ev := in.events[i] if ev.index > i { // push @@ -104,7 +137,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // matches an element of the types slice. func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) - for i := 0; i < len(in.events); { + for i := int32(0); i < int32(len(in.events)); { ev := in.events[i] if ev.index > i { // push @@ -138,7 +171,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node - for i := 0; i < len(in.events); { + for i := int32(0); i < int32(len(in.events)); { ev := in.events[i] if ev.index > i { // push @@ -171,50 +204,83 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s // traverse builds the table of events representing a traversal. func traverse(files []*ast.File) []event { // Preallocate approximate number of events - // based on source file extent. + // based on source file extent of the declarations. + // (We use End-Pos not FileStart-FileEnd to neglect + // the effect of long doc comments.) // This makes traverse faster by 4x (!). var extent int for _, f := range files { extent += int(f.End() - f.Pos()) } // This estimate is based on the net/http package. - capacity := extent * 33 / 100 - if capacity > 1e6 { - capacity = 1e6 // impose some reasonable maximum + capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M) + + v := &visitor{ + events: make([]event, 0, capacity), + stack: []item{{index: -1}}, // include an extra event so file nodes have a parent } - events := make([]event, 0, capacity) - - var stack []event - stack = append(stack, event{}) // include an extra event so file nodes have a parent - for _, f := range files { - ast.Inspect(f, func(n ast.Node) bool { - if n != nil { - // push - ev := event{ - node: n, - typ: 0, // temporarily used to accumulate type bits of subtree - index: len(events), // push event temporarily holds own index - } - stack = append(stack, ev) - events = append(events, ev) - } else { - // pop - top := len(stack) - 1 - ev := stack[top] - typ := typeOf(ev.node) - push := ev.index - parent := top - 1 - - events[push].typ = typ // set type of push - stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. - events[push].index = len(events) // make push refer to pop - - stack = stack[:top] - events = append(events, ev) - } - return true - }) + for _, file := range files { + walk(v, edge.Invalid, -1, file) } - - return events + return v.events +} + +type visitor struct { + events []event + stack []item +} + +type item struct { + index int32 // index of current node's push event + parentIndex int32 // index of parent node's push event + typAccum uint64 // accumulated type bits of current node's descendents + edgeKindAndIndex int32 // edge.Kind and index, bit packed +} + +func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) { + var ( + index = int32(len(v.events)) + parentIndex = v.stack[len(v.stack)-1].index + ) + v.events = append(v.events, event{ + node: node, + parent: parentIndex, + typ: typeOf(node), + index: 0, // (pop index is set later by visitor.pop) + }) + v.stack = append(v.stack, item{ + index: index, + parentIndex: parentIndex, + edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex), + }) + + // 2B nodes ought to be enough for anyone! + if int32(len(v.events)) < 0 { + panic("event index exceeded int32") + } + + // 32M elements in an []ast.Node ought to be enough for anyone! + if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex { + panic("Node slice index exceeded uint25") + } +} + +func (v *visitor) pop(node ast.Node) { + top := len(v.stack) - 1 + current := v.stack[top] + + push := &v.events[current.index] + parent := &v.stack[top-1] + + push.index = int32(len(v.events)) // make push event refer to pop + parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent + + v.stack = v.stack[:top] + + v.events = append(v.events, event{ + node: node, + typ: current.typAccum, + index: current.index, + parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex] + }) } diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go new file mode 100644 index 00000000..c576dc70 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package inspector + +import ( + "go/ast" + "iter" +) + +// PreorderSeq returns an iterator that visits all the +// nodes of the files supplied to New in depth-first order. +// It visits each node n before n's children. +// The complete traversal sequence is determined by ast.Inspect. +// +// The types argument, if non-empty, enables type-based +// filtering of events: only nodes whose type matches an +// element of the types slice are included in the sequence. +func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { + + // This implementation is identical to Preorder, + // except that it supports breaking out of the loop. + + return func(yield func(ast.Node) bool) { + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// All[N] returns an iterator over all the nodes of type N. +// N must be a pointer-to-struct type that implements ast.Node. +// +// Example: +// +// for call := range All[*ast.CallExpr](in) { ... } +func All[N interface { + *S + ast.Node +}, S any](in *Inspector) iter.Seq[N] { + + // To avoid additional dynamic call overheads, + // we duplicate rather than call the logic of PreorderSeq. + + mask := typeOf((N)(nil)) + return func(yield func(N) bool) { + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node.(N)) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 2a872f89..97784484 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,6 +12,8 @@ package inspector import ( "go/ast" "math" + + _ "unsafe" ) const ( @@ -215,8 +217,9 @@ func typeOf(n ast.Node) uint64 { return 0 } +//go:linkname maskOf func maskOf(nodes []ast.Node) uint64 { - if nodes == nil { + if len(nodes) == 0 { return math.MaxUint64 // match all node types } var mask uint64 diff --git a/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/vendor/golang.org/x/tools/go/ast/inspector/walk.go new file mode 100644 index 00000000..5a42174a --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/walk.go @@ -0,0 +1,341 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file is a fork of ast.Inspect to reduce unnecessary dynamic +// calls and to gather edge information. +// +// Consistency with the original is ensured by TestInspectAllNodes. + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/internal/astutil/edge" +) + +func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) { + for i, node := range list { + walk(v, ek, i, node) + } +} + +func walk(v *visitor, ek edge.Kind, index int, node ast.Node) { + v.push(ek, index, node) + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in ast.go) + switch n := node.(type) { + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + walkList(v, edge.CommentGroup_List, n.List) + + case *ast.Field: + if n.Doc != nil { + walk(v, edge.Field_Doc, -1, n.Doc) + } + walkList(v, edge.Field_Names, n.Names) + if n.Type != nil { + walk(v, edge.Field_Type, -1, n.Type) + } + if n.Tag != nil { + walk(v, edge.Field_Tag, -1, n.Tag) + } + if n.Comment != nil { + walk(v, edge.Field_Comment, -1, n.Comment) + } + + case *ast.FieldList: + walkList(v, edge.FieldList_List, n.List) + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + if n.Elt != nil { + walk(v, edge.Ellipsis_Elt, -1, n.Elt) + } + + case *ast.FuncLit: + walk(v, edge.FuncLit_Type, -1, n.Type) + walk(v, edge.FuncLit_Body, -1, n.Body) + + case *ast.CompositeLit: + if n.Type != nil { + walk(v, edge.CompositeLit_Type, -1, n.Type) + } + walkList(v, edge.CompositeLit_Elts, n.Elts) + + case *ast.ParenExpr: + walk(v, edge.ParenExpr_X, -1, n.X) + + case *ast.SelectorExpr: + walk(v, edge.SelectorExpr_X, -1, n.X) + walk(v, edge.SelectorExpr_Sel, -1, n.Sel) + + case *ast.IndexExpr: + walk(v, edge.IndexExpr_X, -1, n.X) + walk(v, edge.IndexExpr_Index, -1, n.Index) + + case *ast.IndexListExpr: + walk(v, edge.IndexListExpr_X, -1, n.X) + walkList(v, edge.IndexListExpr_Indices, n.Indices) + + case *ast.SliceExpr: + walk(v, edge.SliceExpr_X, -1, n.X) + if n.Low != nil { + walk(v, edge.SliceExpr_Low, -1, n.Low) + } + if n.High != nil { + walk(v, edge.SliceExpr_High, -1, n.High) + } + if n.Max != nil { + walk(v, edge.SliceExpr_Max, -1, n.Max) + } + + case *ast.TypeAssertExpr: + walk(v, edge.TypeAssertExpr_X, -1, n.X) + if n.Type != nil { + walk(v, edge.TypeAssertExpr_Type, -1, n.Type) + } + + case *ast.CallExpr: + walk(v, edge.CallExpr_Fun, -1, n.Fun) + walkList(v, edge.CallExpr_Args, n.Args) + + case *ast.StarExpr: + walk(v, edge.StarExpr_X, -1, n.X) + + case *ast.UnaryExpr: + walk(v, edge.UnaryExpr_X, -1, n.X) + + case *ast.BinaryExpr: + walk(v, edge.BinaryExpr_X, -1, n.X) + walk(v, edge.BinaryExpr_Y, -1, n.Y) + + case *ast.KeyValueExpr: + walk(v, edge.KeyValueExpr_Key, -1, n.Key) + walk(v, edge.KeyValueExpr_Value, -1, n.Value) + + // Types + case *ast.ArrayType: + if n.Len != nil { + walk(v, edge.ArrayType_Len, -1, n.Len) + } + walk(v, edge.ArrayType_Elt, -1, n.Elt) + + case *ast.StructType: + walk(v, edge.StructType_Fields, -1, n.Fields) + + case *ast.FuncType: + if n.TypeParams != nil { + walk(v, edge.FuncType_TypeParams, -1, n.TypeParams) + } + if n.Params != nil { + walk(v, edge.FuncType_Params, -1, n.Params) + } + if n.Results != nil { + walk(v, edge.FuncType_Results, -1, n.Results) + } + + case *ast.InterfaceType: + walk(v, edge.InterfaceType_Methods, -1, n.Methods) + + case *ast.MapType: + walk(v, edge.MapType_Key, -1, n.Key) + walk(v, edge.MapType_Value, -1, n.Value) + + case *ast.ChanType: + walk(v, edge.ChanType_Value, -1, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + walk(v, edge.DeclStmt_Decl, -1, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + walk(v, edge.LabeledStmt_Label, -1, n.Label) + walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt) + + case *ast.ExprStmt: + walk(v, edge.ExprStmt_X, -1, n.X) + + case *ast.SendStmt: + walk(v, edge.SendStmt_Chan, -1, n.Chan) + walk(v, edge.SendStmt_Value, -1, n.Value) + + case *ast.IncDecStmt: + walk(v, edge.IncDecStmt_X, -1, n.X) + + case *ast.AssignStmt: + walkList(v, edge.AssignStmt_Lhs, n.Lhs) + walkList(v, edge.AssignStmt_Rhs, n.Rhs) + + case *ast.GoStmt: + walk(v, edge.GoStmt_Call, -1, n.Call) + + case *ast.DeferStmt: + walk(v, edge.DeferStmt_Call, -1, n.Call) + + case *ast.ReturnStmt: + walkList(v, edge.ReturnStmt_Results, n.Results) + + case *ast.BranchStmt: + if n.Label != nil { + walk(v, edge.BranchStmt_Label, -1, n.Label) + } + + case *ast.BlockStmt: + walkList(v, edge.BlockStmt_List, n.List) + + case *ast.IfStmt: + if n.Init != nil { + walk(v, edge.IfStmt_Init, -1, n.Init) + } + walk(v, edge.IfStmt_Cond, -1, n.Cond) + walk(v, edge.IfStmt_Body, -1, n.Body) + if n.Else != nil { + walk(v, edge.IfStmt_Else, -1, n.Else) + } + + case *ast.CaseClause: + walkList(v, edge.CaseClause_List, n.List) + walkList(v, edge.CaseClause_Body, n.Body) + + case *ast.SwitchStmt: + if n.Init != nil { + walk(v, edge.SwitchStmt_Init, -1, n.Init) + } + if n.Tag != nil { + walk(v, edge.SwitchStmt_Tag, -1, n.Tag) + } + walk(v, edge.SwitchStmt_Body, -1, n.Body) + + case *ast.TypeSwitchStmt: + if n.Init != nil { + walk(v, edge.TypeSwitchStmt_Init, -1, n.Init) + } + walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign) + walk(v, edge.TypeSwitchStmt_Body, -1, n.Body) + + case *ast.CommClause: + if n.Comm != nil { + walk(v, edge.CommClause_Comm, -1, n.Comm) + } + walkList(v, edge.CommClause_Body, n.Body) + + case *ast.SelectStmt: + walk(v, edge.SelectStmt_Body, -1, n.Body) + + case *ast.ForStmt: + if n.Init != nil { + walk(v, edge.ForStmt_Init, -1, n.Init) + } + if n.Cond != nil { + walk(v, edge.ForStmt_Cond, -1, n.Cond) + } + if n.Post != nil { + walk(v, edge.ForStmt_Post, -1, n.Post) + } + walk(v, edge.ForStmt_Body, -1, n.Body) + + case *ast.RangeStmt: + if n.Key != nil { + walk(v, edge.RangeStmt_Key, -1, n.Key) + } + if n.Value != nil { + walk(v, edge.RangeStmt_Value, -1, n.Value) + } + walk(v, edge.RangeStmt_X, -1, n.X) + walk(v, edge.RangeStmt_Body, -1, n.Body) + + // Declarations + case *ast.ImportSpec: + if n.Doc != nil { + walk(v, edge.ImportSpec_Doc, -1, n.Doc) + } + if n.Name != nil { + walk(v, edge.ImportSpec_Name, -1, n.Name) + } + walk(v, edge.ImportSpec_Path, -1, n.Path) + if n.Comment != nil { + walk(v, edge.ImportSpec_Comment, -1, n.Comment) + } + + case *ast.ValueSpec: + if n.Doc != nil { + walk(v, edge.ValueSpec_Doc, -1, n.Doc) + } + walkList(v, edge.ValueSpec_Names, n.Names) + if n.Type != nil { + walk(v, edge.ValueSpec_Type, -1, n.Type) + } + walkList(v, edge.ValueSpec_Values, n.Values) + if n.Comment != nil { + walk(v, edge.ValueSpec_Comment, -1, n.Comment) + } + + case *ast.TypeSpec: + if n.Doc != nil { + walk(v, edge.TypeSpec_Doc, -1, n.Doc) + } + walk(v, edge.TypeSpec_Name, -1, n.Name) + if n.TypeParams != nil { + walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams) + } + walk(v, edge.TypeSpec_Type, -1, n.Type) + if n.Comment != nil { + walk(v, edge.TypeSpec_Comment, -1, n.Comment) + } + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + if n.Doc != nil { + walk(v, edge.GenDecl_Doc, -1, n.Doc) + } + walkList(v, edge.GenDecl_Specs, n.Specs) + + case *ast.FuncDecl: + if n.Doc != nil { + walk(v, edge.FuncDecl_Doc, -1, n.Doc) + } + if n.Recv != nil { + walk(v, edge.FuncDecl_Recv, -1, n.Recv) + } + walk(v, edge.FuncDecl_Name, -1, n.Name) + walk(v, edge.FuncDecl_Type, -1, n.Type) + if n.Body != nil { + walk(v, edge.FuncDecl_Body, -1, n.Body) + } + + case *ast.File: + if n.Doc != nil { + walk(v, edge.File_Doc, -1, n.Doc) + } + walk(v, edge.File_Name, -1, n.Name) + walkList(v, edge.File_Decls, n.Decls) + // don't walk n.Comments - they have been + // visited already through the individual + // nodes + + default: + // (includes *ast.Package) + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.pop(node) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 137cc8df..7b90bc92 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -2,22 +2,64 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. +// Package gcexportdata provides functions for reading and writing +// export data, which is a serialized description of the API of a Go +// package including the names, kinds, types, and locations of all +// exported declarations. // -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" +// The standard Go compiler (cmd/compile) writes an export data file +// for each package it compiles, which it later reads when compiling +// packages that import the earlier one. The compiler must thus +// contain logic to both write and read export data. +// (See the "Export" section in the cmd/compile/README file.) +// +// The [Read] function in this package can read files produced by the +// compiler, producing [go/types] data structures. As a matter of +// policy, Read supports export data files produced by only the last +// two Go releases plus tip; see https://go.dev/issue/68898. The +// export data files produced by the compiler contain additional +// details related to generics, inlining, and other optimizations that +// cannot be decoded by the [Read] function. +// +// In files written by the compiler, the export data is not at the +// start of the file. Before calling Read, use [NewReader] to locate +// the desired portion of the file. +// +// The [Write] function in this package encodes the exported API of a +// Go package ([types.Package]) as a file. Such files can be later +// decoded by Read, but cannot be consumed by the compiler. +// +// # Future changes +// +// Although Read supports the formats written by both Write and the +// compiler, the two are quite different, and there is an open +// proposal (https://go.dev/issue/69491) to separate these APIs. +// +// Under that proposal, this package would ultimately provide only the +// Read operation for compiler export data, which must be defined in +// this module (golang.org/x/tools), not in the standard library, to +// avoid version skew for developer tools that need to read compiler +// export data both before and after a Go release, such as from Go +// 1.23 to Go 1.24. Because this package lives in the tools module, +// clients can update their version of the module some time before the +// Go 1.24 release and rebuild and redeploy their tools, which will +// then be able to consume both Go 1.23 and Go 1.24 export data files, +// so they will work before and after the Go update. (See discussion +// at https://go.dev/issue/15651.) +// +// The operations to import and export [go/types] data structures +// would be defined in the go/types package as Import and Export. +// [Write] would (eventually) delegate to Export, +// and [Read], when it detects a file produced by Export, +// would delegate to Import. +// +// # Deprecations +// +// The [NewImporter] and [Find] functions are deprecated and should +// not be used in new code. The [WriteBundle] and [ReadBundle] +// functions are experimental, and there is an open proposal to +// deprecate them (https://go.dev/issue/69573). +package gcexportdata import ( "bufio" @@ -64,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) { // additional trailing data beyond the end of the export data. func NewReader(r io.Reader) (io.Reader, error) { buf := bufio.NewReader(r) - _, size, err := gcimporter.FindExportData(buf) + size, err := gcimporter.FindExportData(buf) if err != nil { return nil, err } - if size >= 0 { - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil - } else { - // We were given an object file. As such, we don't know how large - // the export data is and must return the entire file. - return buf, nil - } + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil } // readAll works the same way as io.ReadAll, but avoids allocations and copies @@ -100,6 +136,11 @@ func readAll(r io.Reader) ([]byte, error) { // Read reads export data from in, decodes it, and returns type // information for the package. // +// Read is capable of reading export data produced by [Write] at the +// same source code version, or by the last two Go releases (plus tip) +// of the standard Go compiler. Reading files from older compilers may +// produce an error. +// // The package path (effectively its linker symbol prefix) is // specified by path, since unlike the package name, this information // may not be recorded in the export data. @@ -128,22 +169,31 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary, produced by cmd/compile till go1.10 return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed, produced by cmd/compile till go1.19, + // and also by [Write]. + // + // If proposal #69491 is accepted, go/types + // serialization will be implemented by + // types.Export, to which Write would eventually + // delegate (explicitly dropping any pretence at + // inter-version Write-Read compatibility). + // This [Read] function would delegate to types.Import + // when it detects that the file was produced by Export. _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified, produced by cmd/compile since go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err default: - l := len(data) - if l > 10 { - l = 10 - } + l := min(len(data), 10) return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) } } diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go deleted file mode 100644 index c6e7c0d4..00000000 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packagesdriver fetches type sizes for go/packages and go/analysis. -package packagesdriver - -import ( - "context" - "fmt" - "strings" - - "golang.org/x/tools/internal/gocommand" -) - -// TODO(adonovan): move back into go/packages. -func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { - inv.Verb = "list" - inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} - stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) - var goarch, compiler string - if rawErr != nil { - rawErrMsg := rawErr.Error() - if strings.Contains(rawErrMsg, "cannot find main module") || - strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. - // All bets are off. Get GOARCH and guess compiler is gc. - // TODO(matloob): Is this a problem in practice? - inv.Verb = "env" - inv.Args = []string{"GOARCH"} - envout, enverr := gocmdRunner.Run(ctx, inv) - if enverr != nil { - return "", "", enverr - } - goarch = strings.TrimSpace(envout.String()) - compiler = "gc" - } else if friendlyErr != nil { - return "", "", friendlyErr - } else { - // This should be unreachable, but be defensive - // in case RunRaw's error results are inconsistent. - return "", "", rawErr - } - } else { - fields := strings.Fields(stdout.String()) - if len(fields) < 2 { - return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", - stdout.String(), stderr.String()) - } - goarch = fields[0] - compiler = fields[1] - } - return compiler, goarch, nil -} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3531ac8f..f1931d10 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -64,7 +64,7 @@ graph using the Imports fields. The Load function can be configured by passing a pointer to a Config as the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. +causes Load to run in [LoadFiles] mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail @@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them +uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. # The driver protocol -[Load] may be used to load Go packages even in Go projects that use +Load may be used to load Go packages even in Go projects that use alternative build systems, by installing an appropriate "driver" program for the build system and specifying its location in the GOPACKAGESDRIVER environment variable. @@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information is written to the driver's standard input. The driver must write a JSON-encoded [DriverResponse] message to its standard output. (This message differs from the JSON schema produced by 'go list'.) + +The value of the PWD environment variable seen by the driver process +is the preferred name of its working directory. (The working directory +may have other aliases due to symbolic links; see the comment on the +Dir field of [exec.Cmd] for related information.) +When the driver process emits in its response the name of a file +that is a descendant of this directory, it must use an absolute path +that has the value of PWD as a prefix, to ensure that the returned +filenames satisfy the original query. */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index c2b4b711..f37bc651 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -13,6 +13,7 @@ import ( "fmt" "os" "os/exec" + "slices" "strings" ) @@ -79,17 +80,17 @@ type DriverResponse struct { // driver is the type for functions that query the build system for the // packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) +type driver func(cfg *Config, patterns []string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." +// the build system package structure, or "" if not found. // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its // value, otherwise it searches for a binary named gopackagesdriver on the PATH. func findExternalDriver(cfg *Config) driver { const toolPrefix = "GOPACKAGESDRIVER=" tool := "" for _, env := range cfg.Env { - if val := strings.TrimPrefix(env, toolPrefix); val != env { + if val, ok := strings.CutPrefix(env, toolPrefix); ok { tool = val } } @@ -103,7 +104,7 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*DriverResponse, error) { + return func(cfg *Config, patterns []string) (*DriverResponse, error) { req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, @@ -117,7 +118,7 @@ func findExternalDriver(cfg *Config) driver { buf := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd := exec.CommandContext(cfg.Context, tool, patterns...) cmd.Dir = cfg.Dir // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the @@ -131,7 +132,7 @@ func findExternalDriver(cfg *Config) driver { // command. // // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) - cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir) + cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr @@ -150,7 +151,3 @@ func findExternalDriver(cfg *Config) driver { return &response, nil } } - -// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)]. -// TODO(adonovan): use go1.21 slices.Clip. -func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index d9be410a..0458b4f9 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -21,7 +21,6 @@ import ( "sync" "unicode" - "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" ) @@ -81,6 +80,12 @@ type golistState struct { cfg *Config ctx context.Context + runner *gocommand.Runner + + // overlay is the JSON file that encodes the Config.Overlay + // mapping, used by 'go list -overlay=...'. + overlay string + envOnce sync.Once goEnvError error goEnv map[string]string @@ -128,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { +// +// overlay is the JSON file that encodes the cfg.Overlay +// mapping, used by 'go list -overlay=...' +func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -143,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error cfg: cfg, ctx: ctx, vendorDirs: map[string]bool{}, + overlay: overlay, + runner: runner, } // Fill in response.Sizes asynchronously if necessary. - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { errCh := make(chan error) go func() { - compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -312,6 +322,7 @@ type jsonPackage struct { ImportPath string Dir string Name string + Target string Export string GoFiles []string CompiledGoFiles []string @@ -495,13 +506,15 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse pkg := &Package{ Name: p.Name, ID: p.ImportPath, + Dir: p.Dir, + Target: p.Target, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), EmbedFiles: absJoin(p.Dir, p.EmbedFiles), EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), - forTest: p.ForTest, + ForTest: p.ForTest, depsErrors: p.DepsErrors, Module: p.Module, } @@ -682,7 +695,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { // getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner) }) return state.goVersion, state.goVersionError } @@ -752,7 +765,7 @@ func jsonFlag(cfg *Config, goVersion int) string { } } addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", "SwigFiles", "SwigCXXFiles", "SysoFiles") @@ -760,7 +773,7 @@ func jsonFlag(cfg *Config, goVersion int) string { addFields("TestGoFiles", "XTestGoFiles") } } - if cfg.Mode&NeedTypes != 0 { + if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, // even when -compiled isn't passed in. // TODO(#52435): Should we make the test ask for -compiled, or automatically @@ -785,7 +798,7 @@ func jsonFlag(cfg *Config, goVersion int) string { // Request Dir in the unlikely case Export is not absolute. addFields("Dir", "Export") } - if cfg.Mode&needInternalForTest != 0 { + if cfg.Mode&NeedForTest != 0 { addFields("ForTest") } if cfg.Mode&needInternalDepsErrors != 0 { @@ -800,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string { if cfg.Mode&NeedEmbedPatterns != 0 { addFields("EmbedPatterns") } + if cfg.Mode&NeedTarget != 0 { + addFields("Target") + } return "-json=" + strings.Join(fields, ",") } @@ -841,7 +857,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, - Overlay: cfg.goListOverlayFile, + Overlay: state.overlay, } } @@ -852,11 +868,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, inv := state.cfgInvocation() inv.Verb = verb inv.Args = args - gocmdRunner := cfg.gocmdRunner - if gocmdRunner == nil { - gocmdRunner = &gocommand.Runner{} - } - stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + + stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv) if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { @@ -880,6 +893,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return nil, friendlyErr } + // Return an error if 'go list' failed due to missing tools in + // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606). + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) { + return nil, friendlyErr + } + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field // and should be suppressed by go list -e. // @@ -1024,3 +1043,44 @@ func cmdDebugStr(cmd *exec.Cmd) string { } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } + +// getSizesForArgs queries 'go list' for the appropriate +// Compiler and GOARCH arguments to pass to [types.SizesFor]. +func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return "", "", enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr + } else { + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return compiler, goarch, nil +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5c080d21..69eec9f4 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -9,49 +9,48 @@ import ( "strings" ) -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, +var modes = [...]struct { + mode LoadMode + name string +}{ + {NeedName, "NeedName"}, + {NeedFiles, "NeedFiles"}, + {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, + {NeedImports, "NeedImports"}, + {NeedDeps, "NeedDeps"}, + {NeedExportFile, "NeedExportFile"}, + {NeedTypes, "NeedTypes"}, + {NeedSyntax, "NeedSyntax"}, + {NeedTypesInfo, "NeedTypesInfo"}, + {NeedTypesSizes, "NeedTypesSizes"}, + {NeedForTest, "NeedForTest"}, + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, + {NeedTarget, "NeedTarget"}, } -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { +func (mode LoadMode) String() string { + if mode == 0 { return "LoadMode(0)" } var out []string - for i, x := range allModes { - if x > m { - break - } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x + // named bits + for _, item := range modes { + if (mode & item.mode) != 0 { + mode ^= item.mode + out = append(out, item.name) } } - if m != 0 { - out = append(out, "Unknown") + // unnamed residue + if mode != 0 { + if out == nil { + return fmt.Sprintf("LoadMode(%#x)", int(mode)) + } + out = append(out, fmt.Sprintf("%#x", int(mode))) } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) + if len(out) == 1 { + return out[0] + } + return "(" + strings.Join(out, "|") + ")" } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 34306ddd..6665a04c 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,13 +16,13 @@ import ( "go/scanner" "go/token" "go/types" - "io" "log" "os" "path/filepath" "runtime" "strings" "sync" + "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -31,7 +31,6 @@ import ( "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -44,20 +43,33 @@ import ( // ID and Errors (if present) will always be filled. // [Load] may return more information than requested. // +// The Mode flag is a union of several bits named NeedName, +// NeedFiles, and so on, each of which determines whether +// a given field of Package (Name, Files, etc) should be +// populated. +// +// For convenience, we provide named constants for the most +// common combinations of Need flags: +// +// [LoadFiles] lists of files in each package +// [LoadImports] ... plus imports +// [LoadTypes] ... plus type information +// [LoadSyntax] ... plus type-annotated syntax +// [LoadAllSyntax] ... for all dependencies +// // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/48226 -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://go.dev/issue/56633 +// - https://go.dev/issue/56677 +// - https://go.dev/issue/58726 +// - https://go.dev/issue/63517 type LoadMode int const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds GoFiles and OtherFiles. + // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -76,10 +88,10 @@ const ( // NeedTypes adds Types, Fset, and IllTyped. NeedTypes - // NeedSyntax adds Syntax. + // NeedSyntax adds Syntax and Fset. NeedSyntax - // NeedTypesInfo adds TypesInfo. + // NeedTypesInfo adds TypesInfo and Fset. NeedTypesInfo // NeedTypesSizes adds TypesSizes. @@ -88,9 +100,10 @@ const ( // needInternalDepsErrors adds the internal deps errors field for use by gopls. needInternalDepsErrors - // needInternalForTest adds the internal forTest field. + // NeedForTest adds ForTest. + // // Tests must also be set on the context for this field to be populated. - needInternalForTest + NeedForTest // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. @@ -104,43 +117,39 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + + // NeedTarget adds Target. + NeedTarget + + // Be sure to update loadmode_string.go when adding new items! ) const ( - // Deprecated: LoadFiles exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadFiles loads the name and file names for the initial packages. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles - // Deprecated: LoadImports exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadImports loads the name, file names, and import mapping for the initial packages. LoadImports = LoadFiles | NeedImports - // Deprecated: LoadTypes exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadTypes loads exported type information for the initial packages. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes - // Deprecated: LoadSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadSyntax loads typed syntax for the initial packages. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo - // Deprecated: LoadAllSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + // + //go:fix inline NeedExportsFile = NeedExportFile ) // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. // -// Calls to Load do not modify this struct. -// -// TODO(adonovan): #67702: this is currently false: in fact, -// calls to [Load] do not modify the public fields of this struct, but -// may modify hidden fields, so concurrent calls to [Load] must not -// use the same Config. But perhaps we should reestablish the -// documented invariant. +// Calls to [Load] do not modify this struct. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode @@ -154,7 +163,7 @@ type Config struct { // If the user provides a logger, debug logging is enabled. // If the GOPACKAGESDEBUG environment variable is set to true, // but the logger is nil, default to log.Printf. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Dir is the directory in which to run the build system's query tool // that provides information about the packages. @@ -171,19 +180,10 @@ type Config struct { // Env []string - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - // BuildFlags is a list of command-line flags to be passed through to // the build system's query tool. BuildFlags []string - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string - // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -230,21 +230,24 @@ type Config struct { // drivers may vary in their level of support for overlays. Overlay map[string][]byte - // goListOverlayFile is the JSON file that encodes the Overlay - // mapping, used by 'go list -overlay=...' - goListOverlayFile string + // -- Hidden configuration fields only for use in x/tools -- + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string } // Load loads and returns the Go packages named by the given patterns. // -// Config specifies loading options; -// nil behaves the same as an empty Config. +// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. // // The [Config.Mode] field is a set of bits that determine what kinds // of information should be computed and returned. Modes that require // more information tend to be slower. See [LoadMode] for details // and important caveats. Its zero value is equivalent to -// NeedName | NeedFiles | NeedCompiledGoFiles. +// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. // // Each call to Load returns a new set of [Package] instances. // The Packages and their Imports form a directed acyclic graph. @@ -261,7 +264,7 @@ type Config struct { // Errors associated with a particular package are recorded in the // corresponding Package's Errors list, and do not cause Load to // return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is +// proceeding with further analysis. The [PrintErrors] function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { ld := newLoader(cfg) @@ -324,21 +327,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro } else if !response.NotHandled { return response, true, nil } - // (fall through) + // not handled: fall through } // go list fallback - // + // Write overlays once, as there are many calls // to 'go list' (one per chunk plus others too). - overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) if err != nil { return nil, false, err } defer cleanupOverlay() - cfg.goListOverlayFile = overlay - response, err := callDriverOnChunks(goListDriver, cfg, chunks) + var runner gocommand.Runner // (shared across many 'go list' calls) + driver := func(cfg *Config, patterns []string) (*DriverResponse, error) { + return goListDriver(cfg, &runner, overlayFile, patterns) + } + response, err := callDriverOnChunks(driver, cfg, chunks) if err != nil { return nil, false, err } @@ -376,16 +382,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { if len(chunks) == 0 { - return driver(cfg) + return driver(cfg, nil) } responses := make([]*DriverResponse, len(chunks)) errNotHandled := errors.New("driver returned NotHandled") var g errgroup.Group for i, chunk := range chunks { - i := i - chunk := chunk g.Go(func() (err error) { - responses[i], err = driver(cfg, chunk...) + responses[i], err = driver(cfg, chunk) if responses[i] != nil && responses[i].NotHandled { err = errNotHandled } @@ -435,6 +439,12 @@ type Package struct { // PkgPath is the package path as used by the go/types package. PkgPath string + // Dir is the directory associated with the package, if it exists. + // + // For packages listed by the go command, this is the directory containing + // the package files. + Dir string + // Errors contains any errors encountered querying the metadata // of the package, or while parsing or type-checking its files. Errors []Error @@ -474,6 +484,10 @@ type Package struct { // information for the package as provided by the build system. ExportFile string + // Target is the absolute install path of the .a file, for libraries, + // and of the executable file, for binaries. + Target string + // Imports maps import paths appearing in the package's Go source files // to corresponding loaded Packages. Imports map[string]*Package @@ -522,8 +536,8 @@ type Package struct { // -- internal -- - // forTest is the package under test, if any. - forTest string + // ForTest is the package under test, if any. + ForTest string // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError @@ -552,21 +566,17 @@ type ModuleError struct { } func init() { - packagesinternal.GetForTest = func(p interface{}) string { - return p.(*Package).forTest - } - packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.SetModFile = func(config interface{}, value string) { + packagesinternal.SetModFile = func(config any, value string) { config.(*Config).modFile = value } - packagesinternal.SetModFlag = func(config interface{}, value string) { + packagesinternal.SetModFlag = func(config any, value string) { config.(*Config).modFlag = value } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) - packagesinternal.ForTest = int(needInternalForTest) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -682,18 +692,19 @@ func (p *Package) String() string { return p.ID } // loaderPackage augments Package with state used during the loading phase type loaderPackage struct { *Package - importErrors map[string]error // maps each bad import to its error - loadOnce sync.Once - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH + importErrors map[string]error // maps each bad import to its error + preds []*loaderPackage // packages that import this one + unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. type loader struct { - pkgs map[string]*loaderPackage + pkgs map[string]*loaderPackage // keyed by Package.ID Config sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue @@ -730,7 +741,7 @@ func newLoader(cfg *Config) *loader { if debug { ld.Config.Logf = log.Printf } else { - ld.Config.Logf = func(format string, args ...interface{}) {} + ld.Config.Logf = func(format string, args ...any) {} } } if ld.Config.Mode == 0 { @@ -739,9 +750,6 @@ func newLoader(cfg *Config) *loader { if ld.Config.Env == nil { ld.Config.Env = os.Environ() } - if ld.Config.gocmdRunner == nil { - ld.Config.gocmdRunner = &gocommand.Runner{} - } if ld.Context == nil { ld.Context = context.Background() } @@ -755,7 +763,7 @@ func newLoader(cfg *Config) *loader { ld.requestedMode = ld.Mode ld.Mode = impliedLoadMode(ld.Mode) - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -764,6 +772,7 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + // We implicitly promise to keep doing ast.Object resolution. :( const mode = parser.AllErrors | parser.ParseComments return parser.ParseFile(fset, filename, src, mode) } @@ -795,7 +804,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" // This package needs type information if the caller requested types and the package is // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) // This package needs source if the call requested source (or types info, which implies source) // and the package is either a root, or itas a non- root and the user requested dependencies... needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || @@ -820,9 +829,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } } - if ld.Mode&NeedImports != 0 { - // Materialize the import graph. - + // Materialize the import graph if it is needed (NeedImports), + // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}). + var leaves []*loaderPackage // packages with no unfinished successors + if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { const ( white = 0 // new grey = 1 // in progress @@ -841,63 +851,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // dependency on a package that does. These are the only packages // for which we load source code. var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: + var visit func(from, lpkg *loaderPackage) bool + visit = func(from, lpkg *loaderPackage) bool { + if lpkg.color == grey { panic("internal error: grey node") } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) - } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) + if lpkg.color == white { + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) } - lpkg.importErrors[importPath] = importErr - continue + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(lpkg, imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package } - if visit(imp) { - lpkg.needsrc = true + // -- postorder -- + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } } - lpkg.Imports[importPath] = imp.Package + + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + + // Add packages with no imports directly to the queue of leaves. + if len(lpkg.Imports) == 0 { + leaves = append(leaves, lpkg) + } + + stack = stack[:len(stack)-1] // pop + lpkg.color = black } - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true - } + // Add edge from predecessor. + if from != nil { + from.unfinishedSuccs.Add(+1) // incref + lpkg.preds = append(lpkg.preds, from) } - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } - stack = stack[:len(stack)-1] // pop - lpkg.color = black - return lpkg.needsrc } // For each initial package, create its import DAG. for _, lpkg := range initial { - visit(lpkg) + visit(nil, lpkg) } } else { @@ -910,16 +933,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - var wg sync.WaitGroup - for _, lpkg := range initial { - wg.Add(1) - go func(lpkg *loaderPackage) { - ld.loadRecursive(lpkg) - wg.Done() - }(lpkg) + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + + // We avoid using g.SetLimit to limit concurrency as + // it makes g.Go stop accepting work, which prevents + // workers from enqeuing, and thus finishing, and thus + // allowing the group to make progress: deadlock. + // + // Instead we use the ioLimit and cpuLimit semaphores. + g, _ := errgroup.WithContext(ld.Context) + + // enqueues adds a package to the type-checking queue. + // It must have no unfinished successors. + var enqueue func(*loaderPackage) + enqueue = func(lpkg *loaderPackage) { + g.Go(func() error { + // Parse and type-check. + ld.loadPackage(lpkg) + + // Notify each waiting predecessor, + // and enqueue it when it becomes a leaf. + for _, pred := range lpkg.preds { + if pred.unfinishedSuccs.Add(-1) == 0 { // decref + enqueue(pred) + } + } + + return nil + }) + } + + // Load leaves first, adding new packages + // to the queue as they become leaves. + for _, leaf := range leaves { + enqueue(leaf) + } + + if err := g.Wait(); err != nil { + return nil, err // cancelled } - wg.Wait() } // If the context is done, return its error and @@ -961,12 +1013,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } if ld.requestedMode&NeedTypes == 0 { ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil ld.pkgs[i].IllTyped = false } if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } + if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 { + ld.pkgs[i].Fset = nil + } if ld.requestedMode&NeedTypesInfo == 0 { ld.pkgs[i].TypesInfo = nil } @@ -981,31 +1035,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { return result, nil } -// loadRecursive loads the specified package and its dependencies, -// recursively, in parallel, in topological order. -// It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. -func (ld *loader) loadRecursive(lpkg *loaderPackage) { - lpkg.loadOnce.Do(func() { - // Load the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - wg.Add(1) - go func(imp *loaderPackage) { - ld.loadRecursive(imp) - wg.Done() - }(imp) - } - wg.Wait() - ld.loadPackage(lpkg) - }) -} - -// loadPackage loads the specified package. +// loadPackage loads/parses/typechecks the specified package. // It must be called only once per Package, // after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. +// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { // Fill in the blanks to avoid surprises. @@ -1041,6 +1074,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { if !lpkg.needtypes && !lpkg.needsrc { return } + + // TODO(adonovan): this condition looks wrong: + // I think it should be lpkg.needtypes && !lpg.needsrc, + // so that NeedSyntax without NeedTypes can be satisfied by export data. if !lpkg.needsrc { if err := ld.loadFromExportData(lpkg); err != nil { lpkg.Errors = append(lpkg.Errors, Error{ @@ -1146,7 +1183,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } lpkg.Syntax = files - if ld.Config.Mode&NeedTypes == 0 { + if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 { return } @@ -1157,16 +1194,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo != 0 { + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } } - versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1219,6 +1260,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // Type-checking is CPU intensive. + cpuLimit <- unit{} // acquire a token + defer func() { <-cpuLimit }() // release a token + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed @@ -1283,8 +1328,11 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } // We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) +// the number of parallel I/O calls or CPU threads per process. +var ( + ioLimit = make(chan unit, 20) + cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) +) func (ld *loader) parseFile(filename string) (*ast.File, error) { ld.parseCacheMu.Lock() @@ -1301,20 +1349,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var src []byte for f, contents := range ld.Config.Overlay { + // TODO(adonovan): Inefficient for large overlays. + // Do an exact name-based map lookup + // (for nonexistent files) followed by a + // FileID-based map lookup (for existing ones). if sameFile(f, filename) { src = contents + break } } var err error if src == nil { - ioLimit <- true // wait + ioLimit <- unit{} // acquire a token src, err = os.ReadFile(filename) - <-ioLimit // signal + <-ioLimit // release a token } if err != nil { v.err = err } else { + // Parsing is CPU intensive. + cpuLimit <- unit{} // acquire a token v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + <-cpuLimit // release a token } close(v.ready) @@ -1329,18 +1385,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range filenames { - wg.Add(1) - go func(i int, filename string) { + var ( + n = len(filenames) + parsed = make([]*ast.File, n) + errors = make([]error, n) + ) + var g errgroup.Group + for i, filename := range filenames { + // This creates goroutines unnecessarily in the + // cache-hit case, but that case is uncommon. + g.Go(func() error { parsed[i], errors[i] = ld.parseFile(filename) - wg.Done() - }(i, file) + return nil + }) } - wg.Wait() + g.Wait() // Eliminate nils, preserving order. var o int @@ -1499,6 +1558,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { // All these things require knowing the import graph. loadMode |= NeedImports } + if loadMode&NeedTypes != 0 { + // Types require the GoVersion from Module. + loadMode |= NeedModule + } return loadMode } @@ -1507,4 +1570,4 @@ func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } -var _ interface{} = io.Discard // assert build toolchain is go1.16 or later +type unit struct{} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index a1dcc40b..df14ffd9 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { // PrintErrors returns the number of errors printed. func PrintErrors(pkgs []*Package) int { var n int + errModules := make(map[*Module]bool) Visit(pkgs, nil, func(pkg *Package) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ } + + // Print pkg.Module.Error once if present. + mod := pkg.Module + if mod != nil && mod.Error != nil && !errModules[mod] { + errModules[mod] = true + fmt.Fprintln(os.Stderr, mod.Error.Err) + n++ + } }) return n } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index a2386c34..16ed3c17 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -51,7 +51,7 @@ type Path string // // PO package->object Package.Scope.Lookup // OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] // TO type->object Type.{At,Field,Method,Obj} [AFMO] // // All valid paths start with a package and end at an object @@ -63,8 +63,8 @@ type Path string // - The only PO operator is Package.Scope.Lookup, which requires an identifier. // - The only OT operator is Object.Type, // which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTC]; -// one of these (TypeParam) requires an integer operand, +// - The TT operators are encoded as [EKPRUTrCa]; +// two of these ({,Recv}TypeParams) require an integer operand, // which is encoded as a string of decimal digits. // - The TO operators are encoded as [AFMO]; // three of these (At,Field,Method) require an integer operand, @@ -98,19 +98,21 @@ const ( opType = '.' // .Type() (Object) // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opConstraint = 'C' // .Constraint() (TypeParam) + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + opRhs = 'a' // .Rhs() (Alias) // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) ) // For is equivalent to new(Encoder).For(obj). @@ -226,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -278,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path, nil); r != nil { + if alias, ok := T.(*types.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { return Path(r), nil } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { - // generic named type - return Path(r), nil - } + if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { + return Path(r), nil } + + } else if tname.IsAlias() { + // legacy alias + if r := find(obj, T, path); r != nil { + return Path(r), nil + } + + } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil { + return Path(r), nil + } + if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil { return Path(r), nil } } @@ -305,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + if r := find(obj, o.Type(), append(path, opType)); r != nil { return Path(r), nil } } @@ -313,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + if T, ok := types.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -325,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if m == obj { return Path(path2), nil // found declared method } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + if r := find(obj, m.Type(), append(path2, opType)); r != nil { return Path(r), nil } } @@ -440,43 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // // The seen map is used to short circuit cycles through type parameters. If // nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { +// +// The seenMethods map is used internally to short circuit cycles through +// interface methods, such as occur in the following example: +// +// type I interface { f() interface{I} } +// +// See golang/go#68046 for details. +func find(obj types.Object, T types.Type, path []byte) []byte { + return (&finder{obj: obj}).find(T, path) +} + +// finder closes over search state for a call to find. +type finder struct { + obj types.Object // the sought object + seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters + seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces +} + +func (f *finder) find(T types.Type, path []byte) []byte { switch T := T.(type) { - case *aliases.Alias: - return find(obj, aliases.Unalias(T), path, seen) + case *types.Alias: + return f.find(types.Unalias(T), path) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. return nil case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + if r := f.find(T.Key(), append(path, opKey)); r != nil { return r } - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Signature: - if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { + if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil { return r } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil { return r } - return find(obj, T.Results(), append(path, opResults), seen) + if r := f.find(T.Params(), append(path, opParams)); r != nil { + return r + } + return f.find(T.Results(), append(path, opResults)) case *types.Struct: for i := 0; i < T.NumFields(); i++ { fld := T.Field(i) path2 := appendOpArg(path, opField, i) - if fld == obj { + if fld == f.obj { return path2 // found field var } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + if r := f.find(fld.Type(), append(path2, opType)); r != nil { return r } } @@ -485,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] for i := 0; i < T.Len(); i++ { v := T.At(i) path2 := appendOpArg(path, opAt, i) - if v == obj { + if v == f.obj { return path2 // found param/result var } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + if r := f.find(v.Type(), append(path2, opType)); r != nil { return r } } @@ -496,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] case *types.Interface: for i := 0; i < T.NumMethods(); i++ { m := T.Method(i) + if f.seenMethods[m] { + return nil + } path2 := appendOpArg(path, opMethod, i) - if m == obj { + if m == f.obj { return path2 // found interface method } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + if f.seenMethods == nil { + f.seenMethods = make(map[*types.Func]bool) + } + f.seenMethods[m] = true + if r := f.find(m.Type(), append(path2, opType)); r != nil { return r } } return nil case *types.TypeParam: name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { + if f.seenTParamNames[name] { return nil } - if seen == nil { - seen = make(map[*types.TypeName]bool) + if name == f.obj { + return append(path, opObj) } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + if f.seenTParamNames == nil { + f.seenTParamNames = make(map[*types.TypeName]bool) + } + f.seenTParamNames[name] = true + if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil { return r } return nil @@ -525,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte { + return (&finder{obj: obj}).findTypeParam(list, path, op) +} + +func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) - path2 := appendOpArg(path, opTypeParam, i) - if r := find(obj, tparam, path2, seen); r != nil { + path2 := appendOpArg(path, op, i) + if r := f.find(tparam, path2); r != nil { return r } } @@ -580,10 +619,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { code := suffix[0] suffix = suffix[1:] - // Codes [AFM] have an integer operand. + // Codes [AFMTr] have an integer operand. var index int switch code { - case opAt, opField, opMethod, opTypeParam: + case opAt, opField, opMethod, opTypeParam, opRecvTypeParam: rest := strings.TrimLeft(suffix, "0123456789") numerals := suffix[:len(suffix)-len(rest)] suffix = rest @@ -616,7 +655,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil - t = aliases.Unalias(t) + t = types.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -653,6 +692,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = named.Underlying() + case opRhs: + if alias, ok := t.(*types.Alias); ok { + t = aliases.Rhs(alias) + } else if false && aliases.Enabled() { + // The Enabled check is too expensive, so for now we + // simply assume that aliases are not enabled. + // TODO(adonovan): replace with "if true {" when go1.24 is assured. + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) + } + case opTypeParam: hasTypeParams, ok := t.(hasTypeParams) // Named, Signature if !ok { @@ -664,6 +713,17 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = tparams.At(index) + case opRecvTypeParam: + sig, ok := t.(*types.Signature) // Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + rtparams := sig.RecvTypeParams() + if n := rtparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = rtparams.At(index) + case opConstraint: tparam, ok := t.(*types.TypeParam) if !ok { @@ -725,6 +785,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } } + if obj == nil { + panic(p) // path does not end in an object-valued operator + } + if obj.Pkg() != pkg { return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 00000000..53b71339 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + _ "unsafe" // for linkname +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +// +// Functions and methods may potentially have type parameters. +// +// Note: for calls of instantiated functions and methods, Callee returns +// the corresponding generic function or method on the generic type. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + obj := info.Uses[usedIdent(info, call.Fun)] + if obj == nil { + return nil + } + if _, ok := obj.(*types.TypeName); ok { + return nil + } + return obj +} + +// StaticCallee returns the target (function or method) of a static function +// call, if any. It returns nil for calls to builtins. +// +// Note: for calls of instantiated functions and methods, StaticCallee returns +// the corresponding generic function or method on the generic type. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + obj := info.Uses[usedIdent(info, call.Fun)] + fn, _ := obj.(*types.Func) + if fn == nil || interfaceMethod(fn) { + return nil + } + return fn +} + +// usedIdent is the implementation of [internal/typesinternal.UsedIdent]. +// It returns the identifier associated with e. +// See typesinternal.UsedIdent for a fuller description. +// This function should live in typesinternal, but cannot because it would +// create an import cycle. +// +//go:linkname usedIdent +func usedIdent(info *types.Info, e ast.Expr) *ast.Ident { + if info.Types == nil || info.Uses == nil { + panic("one of info.Types or info.Uses is nil; both must be populated") + } + // Look through type instantiation if necessary. + switch d := ast.Unparen(e).(type) { + case *ast.IndexExpr: + if info.Types[d.Index].IsType() { + e = d.X + } + case *ast.IndexListExpr: + e = d.X + } + + switch e := ast.Unparen(e).(type) { + // info.Uses always has the object we want, even for selector expressions. + // We don't need info.Selections. + // See go/types/recording.go:recordSelection. + case *ast.Ident: + return e + case *ast.SelectorExpr: + return e.Sel + } + return nil +} + +// interfaceMethod reports whether its argument is a method of an interface. +// This function should live in typesinternal, but cannot because it would create an import cycle. +// +//go:linkname interfaceMethod +func interfaceMethod(f *types.Func) bool { + recv := f.Signature().Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 00000000..b81ce0c3 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,30 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 00000000..b6d542c6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,475 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as [Map], +// a hash table that maps [types.Type] to any value. +package typeutil + +import ( + "bytes" + "fmt" + "go/types" + "hash/maphash" + "unsafe" + + "golang.org/x/tools/internal/typeparams" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Read-only map operations ([Map.At], [Map.Len], and so on) may +// safely be called concurrently. +// +// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420 +// and 69559, if the latter proposals for a generic hash-map type and +// a types.Hash function are accepted. +type Map struct { + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value any +} + +// SetHasher has no effect. +// +// It is a relic of an optimization that is no longer profitable. Do +// not use [Hasher], [MakeHasher], or [SetHasher] in new code. +func (m *Map) SetHasher(Hasher) {} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +func (m *Map) At(key types.Type) any { + if m != nil && m.table != nil { + for _, e := range m.table[hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value any) (prev any) { + if m.table != nil { + hash := hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + hash := hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +func (m *Map) Iterate(f func(key types.Type, value any)) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ any) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value any) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +func (m *Map) KeysString() string { + return m.toString(false) +} + +// -- Hasher -- + +// hash returns the hash of type t. +// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted. +func hash(t types.Type) uint32 { + return theHasher.Hash(t) +} + +// A Hasher provides a [Hasher.Hash] method to map a type to its hash value. +// Hashers are stateless, and all are equivalent. +type Hasher struct{} + +var theHasher Hasher + +// MakeHasher returns Hasher{}. +// Hashers are stateless; all are equivalent. +func MakeHasher() Hasher { return theHasher } + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + return hasher{inGenericSig: false}.hash(t) +} + +// hasher holds the state of a single Hash traversal: whether we are +// inside the signature of a generic function; this is used to +// optimize [hasher.hashTypeParam]. +type hasher struct{ inGenericSig bool } + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hash computes the hash of t. +func (h hasher) hash(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Alias: + return h.hash(types.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + + tparams := t.TypeParams() + if n := tparams.Len(); n > 0 { + h.inGenericSig = true // affects constraints, params, and results + + for i := range n { + tparam := tparams.At(i) + hash += 7 * h.hash(tparam.Constraint()) + } + } + + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Union: + return h.hashUnion(t) + + case *types.Interface: + // Interfaces are identical if they have the same set of methods, with + // identical names and types, and they have the same set of type + // restrictions. See go/types.identical for more details. + var hash uint32 = 9103 + + // Hash methods. + for i, n := 0, t.NumMethods(); i < n; i++ { + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) + } + + // Hash type restrictions. + terms, err := typeparams.InterfaceTermSet(t) + // if err != nil t has invalid type restrictions. + if err == nil { + hash += h.hashTermSet(terms) + } + + return hash + + case *types.Map: + return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem()) + + case *types.Named: + hash := h.hashTypeName(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.hash(targ) + } + return hash + + case *types.TypeParam: + return h.hashTypeParam(t) + + case *types.Tuple: + return h.hashTuple(t) + } + + panic(fmt.Sprintf("%T: %v", t, t)) +} + +func (h hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := range n { + hash += 3 * h.hash(tuple.At(i).Type()) + } + return hash +} + +func (h hasher) hashUnion(t *types.Union) uint32 { + // Hash type restrictions. + terms, err := typeparams.UnionTermSet(t) + // if err != nil t has invalid type restrictions. Fall back on a non-zero + // hash. + if err != nil { + return 9151 + } + return h.hashTermSet(terms) +} + +func (h hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.hash(term.Type()) + if term.Tilde() { + termHash *= 9161 + } + hash += 3 * termHash + } + return hash +} + +// hashTypeParam returns the hash of a type parameter. +func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { + // Within the signature of a generic function, TypeParams are + // identical if they have the same index and constraint, so we + // hash them based on index. + // + // When we are outside a generic function, free TypeParams are + // identical iff they are the same object, so we can use a + // more discriminating hash consistent with object identity. + // This optimization saves [Map] about 4% when hashing all the + // types.Info.Types in the forward closure of net/http. + if !h.inGenericSig { + // Optimization: outside a generic function signature, + // use a more discrimating hash consistent with object identity. + return h.hashTypeName(t.Obj()) + } + return 9173 + 3*uint32(t.Index()) +} + +var theSeed = maphash.MakeSeed() + +// hashTypeName hashes the pointer of tname. +func (hasher) hashTypeName(tname *types.TypeName) uint32 { + // Since types.Identical uses == to compare TypeNames, + // the Hash function uses maphash.Comparable. + // TODO(adonovan): or will, when it becomes available in go1.24. + // In the meantime we use the pointer's numeric value. + // + // hash := maphash.Comparable(theSeed, tname) + // + // (Another approach would be to hash the name and package + // path, and whether or not it is a package-level typename. It + // is rare for a package to define multiple local types with + // the same name.) + ptr := uintptr(unsafe.Pointer(tname)) + if unsafe.Sizeof(ptr) == 8 { + hash := uint64(ptr) + return uint32(hash ^ (hash >> 32)) + } else { + return uint32(ptr) + } +} + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *types.Alias: + return h.shallowHash(types.Unalias(t)) + + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := range n { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *types.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashTypeName(t.Obj()) + + case *types.TypeParam: + return h.hashTypeParam(t) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 00000000..f7666028 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := types.Unalias(T).(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 00000000..9dda6a25 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import ( + "go/types" +) + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := types.Unalias(T).(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index c24c2eee..b9425f5a 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -22,11 +22,17 @@ import ( // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled // function is expensive and should be called once per task (e.g. // package import), not once per call to NewAlias. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { +// +// Precondition: enabled || len(tparams)==0. +// If materialized aliases are disabled, there must not be any type parameters. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs) + SetTypeParams(types.NewAlias(tname, rhs), tparams) return tname } + if len(tparams) > 0 { + panic("cannot create an alias with type parameters when gotypesalias is not enabled") + } return types.NewTypeName(pos, pkg, name, rhs) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go deleted file mode 100644 index c027b9f3..00000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package aliases - -import ( - "go/types" -) - -// Alias is a placeholder for a go/types.Alias for <=1.21. -// It will never be created by go/types. -type Alias struct{} - -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } - -// Unalias returns the type t for go <=1.21. -func Unalias(t types.Type) types.Type { return t } - -func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// Before go1.22, this function always returns false. -func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index b3299548..7716a333 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.22 -// +build go1.22 - package aliases import ( @@ -14,31 +11,51 @@ import ( "go/types" ) -// Alias is an alias of types.Alias. -type Alias = types.Alias - // Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *Alias) types.Type { +func Rhs(alias *types.Alias) types.Type { if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { return alias.Rhs() // go1.23+ } // go1.22's Alias didn't have the Rhs method, // so Unalias is the best we can do. - return Unalias(alias) + return types.Unalias(alias) } -// Unalias is a wrapper of types.Unalias. -func Unalias(t types.Type) types.Type { return types.Unalias(t) } +// TypeParams returns the type parameter list of the alias. +func TypeParams(alias *types.Alias) *types.TypeParamList { + if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { + return alias.TypeParams() // go1.23+ + } + return nil +} -// newAlias is an internal alias around types.NewAlias. -// Direct usage is discouraged as the moment. -// Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type) *Alias { - a := types.NewAlias(tname, rhs) - // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. - Unalias(a) - return a +// SetTypeParams sets the type parameters of the alias type. +func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { + if alias, ok := any(alias).(interface { + SetTypeParams(tparams []*types.TypeParam) + }); ok { + alias.SetTypeParams(tparams) // go1.23+ + } else if len(tparams) > 0 { + panic("cannot set type parameters of an Alias type in go1.22") + } +} + +// TypeArgs returns the type arguments used to instantiate the Alias type. +func TypeArgs(alias *types.Alias) *types.TypeList { + if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { + return alias.TypeArgs() // go1.23+ + } + return nil // empty (go1.22) +} + +// Origin returns the generic Alias type of which alias is an instance. +// If alias is not an instance of a generic alias, Origin returns alias. +func Origin(alias *types.Alias) *types.Alias { + if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { + return alias.Origin() // go1.23+ + } + return alias // not an instance of a generic alias (go1.22) } // Enabled reports whether [NewAlias] should create [types.Alias] types. @@ -56,7 +73,7 @@ func Enabled() bool { // many tests. Therefore any attempt to cache the result // is just incorrect. fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) return enabled diff --git a/vendor/golang.org/x/tools/internal/astutil/edge/edge.go b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go new file mode 100644 index 00000000..4f6ccfd6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go @@ -0,0 +1,295 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edge defines identifiers for each field of an ast.Node +// struct type that refers to another Node. +package edge + +import ( + "fmt" + "go/ast" + "reflect" +) + +// A Kind describes a field of an ast.Node struct. +type Kind uint8 + +// String returns a description of the edge kind. +func (k Kind) String() string { + if k == Invalid { + return "" + } + info := fieldInfos[k] + return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name) +} + +// NodeType returns the pointer-to-struct type of the ast.Node implementation. +func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType } + +// FieldName returns the name of the field. +func (k Kind) FieldName() string { return fieldInfos[k].name } + +// FieldType returns the declared type of the field. +func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType } + +// Get returns the direct child of n identified by (k, idx). +// n's type must match k.NodeType(). +// idx must be a valid slice index, or -1 for a non-slice. +func (k Kind) Get(n ast.Node, idx int) ast.Node { + if k.NodeType() != reflect.TypeOf(n) { + panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n)) + } + v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index) + if idx != -1 { + v = v.Index(idx) // asserts valid index + } else { + // (The type assertion below asserts that v is not a slice.) + } + return v.Interface().(ast.Node) // may be nil +} + +const ( + Invalid Kind = iota // for nodes at the root of the traversal + + // Kinds are sorted alphabetically. + // Numbering is not stable. + // Each is named Type_Field, where Type is the + // ast.Node struct type and Field is the name of the field + + ArrayType_Elt + ArrayType_Len + AssignStmt_Lhs + AssignStmt_Rhs + BinaryExpr_X + BinaryExpr_Y + BlockStmt_List + BranchStmt_Label + CallExpr_Args + CallExpr_Fun + CaseClause_Body + CaseClause_List + ChanType_Value + CommClause_Body + CommClause_Comm + CommentGroup_List + CompositeLit_Elts + CompositeLit_Type + DeclStmt_Decl + DeferStmt_Call + Ellipsis_Elt + ExprStmt_X + FieldList_List + Field_Comment + Field_Doc + Field_Names + Field_Tag + Field_Type + File_Decls + File_Doc + File_Name + ForStmt_Body + ForStmt_Cond + ForStmt_Init + ForStmt_Post + FuncDecl_Body + FuncDecl_Doc + FuncDecl_Name + FuncDecl_Recv + FuncDecl_Type + FuncLit_Body + FuncLit_Type + FuncType_Params + FuncType_Results + FuncType_TypeParams + GenDecl_Doc + GenDecl_Specs + GoStmt_Call + IfStmt_Body + IfStmt_Cond + IfStmt_Else + IfStmt_Init + ImportSpec_Comment + ImportSpec_Doc + ImportSpec_Name + ImportSpec_Path + IncDecStmt_X + IndexExpr_Index + IndexExpr_X + IndexListExpr_Indices + IndexListExpr_X + InterfaceType_Methods + KeyValueExpr_Key + KeyValueExpr_Value + LabeledStmt_Label + LabeledStmt_Stmt + MapType_Key + MapType_Value + ParenExpr_X + RangeStmt_Body + RangeStmt_Key + RangeStmt_Value + RangeStmt_X + ReturnStmt_Results + SelectStmt_Body + SelectorExpr_Sel + SelectorExpr_X + SendStmt_Chan + SendStmt_Value + SliceExpr_High + SliceExpr_Low + SliceExpr_Max + SliceExpr_X + StarExpr_X + StructType_Fields + SwitchStmt_Body + SwitchStmt_Init + SwitchStmt_Tag + TypeAssertExpr_Type + TypeAssertExpr_X + TypeSpec_Comment + TypeSpec_Doc + TypeSpec_Name + TypeSpec_Type + TypeSpec_TypeParams + TypeSwitchStmt_Assign + TypeSwitchStmt_Body + TypeSwitchStmt_Init + UnaryExpr_X + ValueSpec_Comment + ValueSpec_Doc + ValueSpec_Names + ValueSpec_Type + ValueSpec_Values + + maxKind +) + +// Assert that the encoding fits in 7 bits, +// as the inspector relies on this. +// (We are currently at 104.) +var _ = [1 << 7]struct{}{}[maxKind] + +type fieldInfo struct { + nodeType reflect.Type // pointer-to-struct type of ast.Node implementation + name string + index int + fieldType reflect.Type +} + +func info[N ast.Node](fieldName string) fieldInfo { + nodePtrType := reflect.TypeFor[N]() + f, ok := nodePtrType.Elem().FieldByName(fieldName) + if !ok { + panic(fieldName) + } + return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type} +} + +var fieldInfos = [...]fieldInfo{ + Invalid: {}, + ArrayType_Elt: info[*ast.ArrayType]("Elt"), + ArrayType_Len: info[*ast.ArrayType]("Len"), + AssignStmt_Lhs: info[*ast.AssignStmt]("Lhs"), + AssignStmt_Rhs: info[*ast.AssignStmt]("Rhs"), + BinaryExpr_X: info[*ast.BinaryExpr]("X"), + BinaryExpr_Y: info[*ast.BinaryExpr]("Y"), + BlockStmt_List: info[*ast.BlockStmt]("List"), + BranchStmt_Label: info[*ast.BranchStmt]("Label"), + CallExpr_Args: info[*ast.CallExpr]("Args"), + CallExpr_Fun: info[*ast.CallExpr]("Fun"), + CaseClause_Body: info[*ast.CaseClause]("Body"), + CaseClause_List: info[*ast.CaseClause]("List"), + ChanType_Value: info[*ast.ChanType]("Value"), + CommClause_Body: info[*ast.CommClause]("Body"), + CommClause_Comm: info[*ast.CommClause]("Comm"), + CommentGroup_List: info[*ast.CommentGroup]("List"), + CompositeLit_Elts: info[*ast.CompositeLit]("Elts"), + CompositeLit_Type: info[*ast.CompositeLit]("Type"), + DeclStmt_Decl: info[*ast.DeclStmt]("Decl"), + DeferStmt_Call: info[*ast.DeferStmt]("Call"), + Ellipsis_Elt: info[*ast.Ellipsis]("Elt"), + ExprStmt_X: info[*ast.ExprStmt]("X"), + FieldList_List: info[*ast.FieldList]("List"), + Field_Comment: info[*ast.Field]("Comment"), + Field_Doc: info[*ast.Field]("Doc"), + Field_Names: info[*ast.Field]("Names"), + Field_Tag: info[*ast.Field]("Tag"), + Field_Type: info[*ast.Field]("Type"), + File_Decls: info[*ast.File]("Decls"), + File_Doc: info[*ast.File]("Doc"), + File_Name: info[*ast.File]("Name"), + ForStmt_Body: info[*ast.ForStmt]("Body"), + ForStmt_Cond: info[*ast.ForStmt]("Cond"), + ForStmt_Init: info[*ast.ForStmt]("Init"), + ForStmt_Post: info[*ast.ForStmt]("Post"), + FuncDecl_Body: info[*ast.FuncDecl]("Body"), + FuncDecl_Doc: info[*ast.FuncDecl]("Doc"), + FuncDecl_Name: info[*ast.FuncDecl]("Name"), + FuncDecl_Recv: info[*ast.FuncDecl]("Recv"), + FuncDecl_Type: info[*ast.FuncDecl]("Type"), + FuncLit_Body: info[*ast.FuncLit]("Body"), + FuncLit_Type: info[*ast.FuncLit]("Type"), + FuncType_Params: info[*ast.FuncType]("Params"), + FuncType_Results: info[*ast.FuncType]("Results"), + FuncType_TypeParams: info[*ast.FuncType]("TypeParams"), + GenDecl_Doc: info[*ast.GenDecl]("Doc"), + GenDecl_Specs: info[*ast.GenDecl]("Specs"), + GoStmt_Call: info[*ast.GoStmt]("Call"), + IfStmt_Body: info[*ast.IfStmt]("Body"), + IfStmt_Cond: info[*ast.IfStmt]("Cond"), + IfStmt_Else: info[*ast.IfStmt]("Else"), + IfStmt_Init: info[*ast.IfStmt]("Init"), + ImportSpec_Comment: info[*ast.ImportSpec]("Comment"), + ImportSpec_Doc: info[*ast.ImportSpec]("Doc"), + ImportSpec_Name: info[*ast.ImportSpec]("Name"), + ImportSpec_Path: info[*ast.ImportSpec]("Path"), + IncDecStmt_X: info[*ast.IncDecStmt]("X"), + IndexExpr_Index: info[*ast.IndexExpr]("Index"), + IndexExpr_X: info[*ast.IndexExpr]("X"), + IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"), + IndexListExpr_X: info[*ast.IndexListExpr]("X"), + InterfaceType_Methods: info[*ast.InterfaceType]("Methods"), + KeyValueExpr_Key: info[*ast.KeyValueExpr]("Key"), + KeyValueExpr_Value: info[*ast.KeyValueExpr]("Value"), + LabeledStmt_Label: info[*ast.LabeledStmt]("Label"), + LabeledStmt_Stmt: info[*ast.LabeledStmt]("Stmt"), + MapType_Key: info[*ast.MapType]("Key"), + MapType_Value: info[*ast.MapType]("Value"), + ParenExpr_X: info[*ast.ParenExpr]("X"), + RangeStmt_Body: info[*ast.RangeStmt]("Body"), + RangeStmt_Key: info[*ast.RangeStmt]("Key"), + RangeStmt_Value: info[*ast.RangeStmt]("Value"), + RangeStmt_X: info[*ast.RangeStmt]("X"), + ReturnStmt_Results: info[*ast.ReturnStmt]("Results"), + SelectStmt_Body: info[*ast.SelectStmt]("Body"), + SelectorExpr_Sel: info[*ast.SelectorExpr]("Sel"), + SelectorExpr_X: info[*ast.SelectorExpr]("X"), + SendStmt_Chan: info[*ast.SendStmt]("Chan"), + SendStmt_Value: info[*ast.SendStmt]("Value"), + SliceExpr_High: info[*ast.SliceExpr]("High"), + SliceExpr_Low: info[*ast.SliceExpr]("Low"), + SliceExpr_Max: info[*ast.SliceExpr]("Max"), + SliceExpr_X: info[*ast.SliceExpr]("X"), + StarExpr_X: info[*ast.StarExpr]("X"), + StructType_Fields: info[*ast.StructType]("Fields"), + SwitchStmt_Body: info[*ast.SwitchStmt]("Body"), + SwitchStmt_Init: info[*ast.SwitchStmt]("Init"), + SwitchStmt_Tag: info[*ast.SwitchStmt]("Tag"), + TypeAssertExpr_Type: info[*ast.TypeAssertExpr]("Type"), + TypeAssertExpr_X: info[*ast.TypeAssertExpr]("X"), + TypeSpec_Comment: info[*ast.TypeSpec]("Comment"), + TypeSpec_Doc: info[*ast.TypeSpec]("Doc"), + TypeSpec_Name: info[*ast.TypeSpec]("Name"), + TypeSpec_Type: info[*ast.TypeSpec]("Type"), + TypeSpec_TypeParams: info[*ast.TypeSpec]("TypeParams"), + TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"), + TypeSwitchStmt_Body: info[*ast.TypeSwitchStmt]("Body"), + TypeSwitchStmt_Init: info[*ast.TypeSwitchStmt]("Init"), + UnaryExpr_X: info[*ast.UnaryExpr]("X"), + ValueSpec_Comment: info[*ast.ValueSpec]("Comment"), + ValueSpec_Doc: info[*ast.ValueSpec]("Doc"), + ValueSpec_Names: info[*ast.ValueSpec]("Names"), + ValueSpec_Type: info[*ast.ValueSpec]("Type"), + ValueSpec_Values: info[*ast.ValueSpec]("Values"), +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go index a02206e3..4cfa51b6 100644 --- a/vendor/golang.org/x/tools/internal/event/keys/keys.go +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -32,7 +32,7 @@ func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { } // Get can be used to get a label for the key from a label.Map. -func (k *Value) Get(lm label.Map) interface{} { +func (k *Value) Get(lm label.Map) any { if t := lm.Find(k); t.Valid() { return k.From(t) } @@ -40,10 +40,10 @@ func (k *Value) Get(lm label.Map) interface{} { } // From can be used to get a value from a Label. -func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } +func (k *Value) From(t label.Label) any { return t.UnpackValue() } // Of creates a new Label with this key and the supplied value. -func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } +func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) } // Tag represents a key for tagging labels that have no value. // These are used when the existence of the label is the entire information it diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go index 0f526e1f..92a39105 100644 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "reflect" + "slices" "unsafe" ) @@ -32,7 +33,7 @@ type Key interface { type Label struct { key Key packed uint64 - untyped interface{} + untyped any } // Map is the interface to a collection of Labels indexed by key. @@ -76,13 +77,13 @@ type mapChain struct { // OfValue creates a new label from the key and value. // This method is for implementing new key types, label creation should // normally be done with the Of method of the key. -func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } +func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} } // UnpackValue assumes the label was built using LabelOfValue and returns the value // that was passed to that constructor. // This method is for implementing new key types, for type safety normal // access should be done with the From method of the key. -func (t Label) UnpackValue() interface{} { return t.untyped } +func (t Label) UnpackValue() any { return t.untyped } // Of64 creates a new label from a key and a uint64. This is often // used for non uint64 values that can be packed into a uint64. @@ -154,10 +155,8 @@ func (f *filter) Valid(index int) bool { func (f *filter) Label(index int) Label { l := f.underlying.Label(index) - for _, f := range f.keys { - if l.Key() == f { - return Label{} - } + if slices.Contains(f.keys, l.Key()) { + return Label{} } return l } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d98b0db2..734c4619 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -14,7 +14,7 @@ import ( "sync" ) -func errorf(format string, args ...interface{}) { +func errorf(format string, args ...any) { panic(fmt.Sprintf(format, args...)) } @@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir { return 0 } } - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index f6437feb..5662a311 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -2,49 +2,183 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. +// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. +// This file also additionally implements FindExportData for gcexportdata.NewReader. package gcimporter import ( "bufio" + "bytes" + "errors" "fmt" + "go/build" "io" - "strconv" + "os" + "os/exec" + "path/filepath" "strings" + "sync" ) -func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying cmd/compile created archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. +// This returns the length of the export data in bytes. +// +// This function is needed by [gcexportdata.Read], which must +// accept inputs produced by the last two releases of cmd/compile, +// plus tip. +func FindExportData(r *bufio.Reader) (size int64, err error) { + arsize, err := FindPackageDefinition(r) if err != nil { return } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) - } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - length, err := strconv.Atoi(s) - size = int64(length) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") + size = int64(arsize) + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { return } - name = strings.TrimSpace(string(hdr[:16])) + size -= int64(len(objapi)) + for _, h := range headers { + size -= int64(len(h)) + } + + // Check for the binary export data section header "$$B\n". + // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading + line, err := r.ReadSlice('\n') + if err != nil { + return + } + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return + } + size -= int64(len(hdr)) + + // For files with a binary export data header "$$B\n", + // these are always terminated by an end-of-section marker "\n$$\n". + // So the last bytes must always be this constant. + // + // The end-of-section marker is not a part of the export data itself. + // Do not include these in size. + // + // It would be nice to have sanity check that the final bytes after + // the export data are indeed the end-of-section marker. The split + // of gcexportdata.NewReader and gcexportdata.Read make checking this + // ugly so gcimporter gives up enforcing this. The compiler and go/types + // importer do enforce this, which seems good enough. + const endofsection = "\n$$\n" + size -= int64(len(endofsection)) + + if size < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) + return + } + return } -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// The size result is the length of the export data in bytes, or -1 if not known. -func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { +// ReadUnified reads the contents of the unified export data from a reader r +// that contains the contents of a GC-created archive file. +// +// On success, the reader will be positioned after the end-of-section marker "\n$$\n". +// +// Supported GC-created archive files have 4 layers of nesting: +// - An archive file containing a package definition file. +// - The package definition file contains headers followed by a data section. +// Headers are lines (≤ 4kb) that do not start with "$$". +// - The data section starts with "$$B\n" followed by export data followed +// by an end of section marker "\n$$\n". (The section start "$$\n" is no +// longer supported.) +// - The export data starts with a format byte ('u') followed by the in +// the given format. (See ReadExportDataHeader for older formats.) +// +// Putting this together, the bytes in a GC-created archive files are expected +// to look like the following. +// See cmd/internal/archive for more details on ar file headers. +// +// | \n | ar file signature +// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size. +// | go object <...>\n | objabi header +// | \n | other headers such as build id +// | $$B\n | binary format marker +// | u\n | unified export +// | $$\n | end-of-section marker +// | [optional padding] | padding byte (0x0A) if size is odd +// | [ar file header] | other ar files +// | [ar file data] | +func ReadUnified(r *bufio.Reader) (data []byte, err error) { + // We historically guaranteed headers at the default buffer size (4096) work. + // This ensures we can use ReadSlice throughout. + const minBufferSize = 4096 + r = bufio.NewReaderSize(r, minBufferSize) + + size, err := FindPackageDefinition(r) + if err != nil { + return + } + n := size + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + n -= len(objapi) + for _, h := range headers { + n -= len(h) + } + + hdrlen, err := ReadExportDataHeader(r) + if err != nil { + return + } + n -= hdrlen + + // size also includes the end of section marker. Remove that many bytes from the end. + const marker = "\n$$\n" + n -= len(marker) + + if n < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n) + return + } + + // Read n bytes from buf. + data = make([]byte, n) + _, err = io.ReadFull(r, data) + if err != nil { + return + } + + // Check for marker at the end. + var suffix [len(marker)]byte + _, err = io.ReadFull(r, suffix[:]) + if err != nil { + return + } + if s := string(suffix[:]); s != marker { + err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker) + return + } + + return +} + +// FindPackageDefinition positions the reader r at the beginning of a package +// definition file ("__.PKGDEF") within a GC-created archive by reading +// from it, and returns the size of the package definition file in the archive. +// +// The reader must be positioned at the start of the archive file before calling +// this function, and "__.PKGDEF" is assumed to be the first file in the archive. +// +// See cmd/internal/archive for details on the archive format. +func FindPackageDefinition(r *bufio.Reader) (size int, err error) { + // Uses ReadSlice to limit risk of malformed inputs. + // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -52,48 +186,236 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } - - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } - - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) - } - - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") + // Is the first line an archive file signature? + if string(line) != "!\n" { + err = fmt.Errorf("not the start of an archive file (%q)", line) return } - // Skip over object header to export data. - // Begins after first line starting with $$. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) - } - hdr = string(line) - if size < 0 { - size = -1 + // package export block should be first + size = readArchiveHeader(r, "__.PKGDEF") + if size <= 0 { + err = fmt.Errorf("not a package file") + return } return } + +// ReadObjectHeaders reads object headers from the reader. Object headers are +// lines that do not start with an end-of-section marker "$$". The first header +// is the objabi header. On success, the reader will be positioned at the beginning +// of the end-of-section marker. +// +// It returns an error if any header does not fit in r.Size() bytes. +func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) { + // line is a temporary buffer for headers. + // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs. + var line []byte + + // objapi header should be the first line + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + objapi = string(line) + + // objapi header begins with "go object ". + if !strings.HasPrefix(objapi, "go object ") { + err = fmt.Errorf("not a go object file: %s", objapi) + return + } + + // process remaining object header lines + for { + // check for an end of section marker "$$" + line, err = r.Peek(2) + if err != nil { + return + } + if string(line) == "$$" { + return // stop + } + + // read next header + line, err = r.ReadSlice('\n') + if err != nil { + return + } + headers = append(headers, string(line)) + } +} + +// ReadExportDataHeader reads the export data header and format from r. +// It returns the number of bytes read, or an error if the format is no longer +// supported or it failed to read. +// +// The only currently supported format is binary export data in the +// unified export format. +func ReadExportDataHeader(r *bufio.Reader) (n int, err error) { + // Read export data header. + line, err := r.ReadSlice('\n') + if err != nil { + return + } + + hdr := string(line) + switch hdr { + case "$$\n": + err = fmt.Errorf("old textual export format no longer supported (recompile package)") + return + + case "$$B\n": + var format byte + format, err = r.ReadByte() + if err != nil { + return + } + // The unified export format starts with a 'u'. + switch format { + case 'u': + default: + // Older no longer supported export formats include: + // indexed export format which started with an 'i'; and + // the older binary export format which started with a 'c', + // 'd', or 'v' (from "version"). + err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format) + return + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + return + } + + n = len(hdr) + 1 // + 1 is for 'u' + return +} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// +// FindPkg is only used in tests within x/tools. +func FindPkg(path, srcDir string) (filename, id string, err error) { + // TODO(taking): Move internal/exportdata.FindPkg into its own file, + // and then this copy into a _test package. + if path == "" { + return "", "", errors.New("path is empty") + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + var bp *build.Package + bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + if bp.Goroot && bp.Dir != "" { + filename, err = lookupGorootExport(bp.Dir) + if err == nil { + _, err = os.Stat(filename) + } + if err == nil { + return filename, bp.ImportPath, nil + } + } + goto notfound + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + } + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + f, statErr := os.Stat(filename) + if statErr == nil && !f.IsDir() { + return filename, id, nil + } + if err == nil { + err = statErr + } + } + +notfound: + if err == nil { + return "", path, fmt.Errorf("can't find import: %q", path) + } + return "", path, fmt.Errorf("can't find import: %q: %w", path, err) +} + +var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension + +var exportMap sync.Map // package dir → func() (string, error) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +// +// lookupGorootExport is only used in tests within x/tools. +func lookupGorootExport(pkgDir string) (string, error) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + err error + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) { + listOnce.Do(func() { + cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT) + var output []byte + output, err = cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + err = errors.New(string(ee.Stderr)) + } + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + err = fmt.Errorf("go list reported %d exports; expected 1", len(exports)) + return + } + + exportPath = exports[0] + }) + + return exportPath, err + }) + } + + return f.(func() (string, error))() +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 39df9112..3dbd21d1 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" - "bytes" "fmt" - "go/build" "go/token" "go/types" "io" "os" - "os/exec" - "path/filepath" - "strings" - "sync" ) const ( @@ -45,125 +39,14 @@ const ( trace = false ) -var exportMap sync.Map // package dir → func() (string, bool) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -func lookupGorootExport(pkgDir string) (string, bool) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { - listOnce.Do(func() { - cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - var output []byte - output, err := cmd.Output() - if err != nil { - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - return - } - - exportPath = exports[0] - }) - - return exportPath, exportPath != "" - }) - } - - return f.(func() (string, bool))() -} - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - var ok bool - if bp.Goroot && bp.Dir != "" { - filename, ok = lookupGorootExport(bp.Dir) - } - if !ok { - id = path // make sure we have an id to print in error message - return - } - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - } - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - if filename != "" { - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { +// +// Import is only used in tests. +func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser - var filename, id string + var id string if lookup != nil { // With custom lookup specified, assume that caller has // converted path to a canonical import path for use in the map. @@ -182,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } rc = f } else { - filename, id = FindPkg(path, srcDir) + var filename string + filename, id, err = FindPkg(path, srcDir) if filename == "" { if path == "unsafe" { return types.Unsafe, nil } - return nil, fmt.Errorf("can't find import: %q", id) + return nil, err } // no need to re-import if the package was imported completely before @@ -210,57 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() - var hdr string - var size int64 buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { + data, err := ReadUnified(buf) + if err != nil { + err = fmt.Errorf("import %q: %v", path, err) return } - switch hdr { - case "$$B\n": - var data []byte - data, err = io.ReadAll(buf) - if err != nil { - break - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': // indexed, till go1.19 - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': // unified, from go1.20 - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } + // unified: emitted by cmd/compile since go1.20. + _, pkg, err = UImportData(fset, packages, data, id) return } - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index deeb67f3..780873e3 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -2,9 +2,227 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' or 'B' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'B' +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. package gcimporter @@ -18,26 +236,46 @@ import ( "io" "math/big" "reflect" + "slices" "sort" "strconv" "strings" "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/tokeninternal" ) // IExportShallow encodes "shallow" export data for the specified package. // +// For types, we use "shallow" export data. Historically, the Go +// compiler always produced a summary of the types for a given package +// that included types from other packages that it indirectly +// referenced: "deep" export data. This had the advantage that the +// compiler (and analogous tools such as gopls) need only load one +// file per direct import. However, it meant that the files tended to +// get larger based on the level of the package in the import +// graph. For example, higher-level packages in the kubernetes module +// have over 1MB of "deep" export data, even when they have almost no +// content of their own, merely because they mention a major type that +// references many others. In pathological cases the export data was +// 300x larger than the source for a package due to this quadratic +// growth. +// +// "Shallow" export data means that the serialized types describe only +// a single package. If those types mention types from other packages, +// the type checker may need to request additional packages beyond +// just the direct imports. Type information for the entire transitive +// closure of imports is provided (lazily) by the DAG. +// // No promises are made about the encoding other than that it can be decoded by // the same version of IIExportShallow. If you plan to save export data in the // file system, be sure to include a cryptographic digest of the executable in // the key to avoid version skew. // -// If the provided reportf func is non-nil, it will be used for reporting bugs -// encountered during export. -// TODO(rfindley): remove reportf when we are confident enough in the new -// objectpath encoding. +// If the provided reportf func is non-nil, it is used for reporting +// bugs (e.g. recovered panics) encountered during export, enabling us +// to obtain via telemetry the stack that would otherwise be lost by +// merely returning an error. func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { // In principle this operation can only fail if out.Write fails, // but that's impossible for bytes.Buffer---and as a matter of @@ -46,13 +284,13 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) // TODO(adonovan): use byte slices throughout, avoiding copying. const bundle, shallow = false, true var out bytes.Buffer - err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, reportf) return out.Bytes(), err } // IImportShallow decodes "shallow" types.Package data encoded by -// IExportShallow in the same executable. This function cannot import data from -// cmd/compile or gcexportdata.Write. +// [IExportShallow] in the same executable. This function cannot import data +// from cmd/compile or gcexportdata.Write. // // The importer calls getPackages to obtain package symbols for all // packages mentioned in the export data, including the one being @@ -73,7 +311,7 @@ func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byt } // ReportFunc is the type of a function used to report formatted bugs. -type ReportFunc = func(string, ...interface{}) +type ReportFunc = func(string, ...any) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -86,20 +324,27 @@ const bundleVersion = 0 // so that calls to IImportData can override with a provided package path. func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { const bundle, shallow = false, false - return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, nil) } // IExportBundle writes an indexed export bundle for pkgs to out. func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { const bundle, shallow = true, false - return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs, nil) } -func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package, reportf ReportFunc) (err error) { if !debug { defer func() { if e := recover(); e != nil { + // Report the stack via telemetry (see #71067). + if reportf != nil { + reportf("panic in exporter") + } if ierr, ok := e.(internalError); ok { + // internalError usually means we exported a + // bad go/types data structure: a violation + // of an implicit precondition of Export. err = ierr return } @@ -221,9 +466,9 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) w.uint64(size) // Sort the set of needed offsets. Duplicates are harmless. - sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) + slices.Sort(needed) - lines := tokeninternal.GetLines(file) // byte offset of each line start + lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) // Rather than record the entire array of line start offsets, @@ -360,7 +605,7 @@ type filePositions struct { needed []uint64 // unordered list of needed file offsets } -func (p *iexporter) trace(format string, args ...interface{}) { +func (p *iexporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -507,13 +752,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -523,9 +768,22 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag(aliasTag) + alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled + + var tparams *types.TypeParamList + if materialized { + tparams = aliases.TypeParams(alias) + } + if tparams.Len() == 0 { + w.tag(aliasTag) + } else { + w.tag(genericAliasTag) + } w.pos(obj.Pos()) - if alias, ok := t.(*aliases.Alias); ok { + if tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + if materialized { // Preserve materialized aliases, // even of non-exported types. t = aliases.Rhs(alias) @@ -562,7 +820,7 @@ func (p *iexporter) doDecl(obj types.Object) { n := named.NumMethods() w.uint64(uint64(n)) - for i := 0; i < n; i++ { + for i := range n { m := named.Method(i) w.pos(m.Pos()) w.string(m.Name()) @@ -744,8 +1002,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { - case *aliases.Alias: - // TODO(adonovan): support parameterized aliases, following *types.Named. + case *types.Alias: + if targs := aliases.TypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(aliases.Origin(t), pkg) + return + } w.startType(aliasType) w.qualifiedType(t.Obj()) @@ -833,7 +1097,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.pkg(fieldPkg) w.uint64(uint64(n)) - for i := 0; i < n; i++ { + for i := range n { f := t.Field(i) if w.p.shallow { w.objectPath(f) @@ -854,7 +1118,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) @@ -882,7 +1146,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.startType(unionType) nt := t.Len() w.uint64(uint64(nt)) - for i := 0; i < nt; i++ { + for i := range nt { term := t.Term(i) w.bool(term.Tilde()) w.typ(term.Type(), pkg) @@ -1011,7 +1275,7 @@ func tparamName(exportName string) string { func (w *exportWriter) paramList(tup *types.Tuple) { n := tup.Len() w.uint64(uint64(n)) - for i := 0; i < n; i++ { + for i := range n { w.param(tup.At(i)) } } @@ -1327,6 +1591,6 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) } // "internalErrorf" as the former is used for bugs, whose cause is // internal inconsistency, whereas the latter is used for ordinary // situations like bad input, whose cause is external. -func internalErrorf(format string, args ...interface{}) error { +func internalErrorf(format string, args ...any) error { return internalError(fmt.Sprintf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 136aa036..82e6c9d2 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -3,9 +3,7 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. +// See iexport.go for the export data format. package gcimporter @@ -18,6 +16,7 @@ import ( "go/types" "io" "math/big" + "slices" "sort" "strings" @@ -53,6 +52,7 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + iexportVersion = iexportVersionGenerics iexportVersionCurrent = 2 ) @@ -315,7 +315,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte pkgs = pkgList[:1] // record all referenced packages as imports - list := append(([]*types.Package)(nil), pkgList[1:]...) + list := slices.Clone(pkgList[1:]) sort.Sort(byPath(list)) pkgs[0].SetImports(list) } @@ -401,7 +401,7 @@ type iimporter struct { indent int // for tracing support } -func (p *iimporter) trace(format string, args ...interface{}) { +func (p *iimporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -540,7 +540,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := aliases.Unalias(rhs).(*types.Interface) + iface, _ := types.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -557,19 +557,28 @@ type importReader struct { prevColumn int64 } +// markBlack is redefined in iimport_go123.go, to work around golang/go#69912. +// +// If TypeNames are not marked black (in the sense of go/types cycle +// detection), they may be mutated when dot-imported. Fix this by punching a +// hole through the type, when compiling with Go 1.23. (The bug has been fixed +// for 1.24, but the fix was not worth back-porting). +var markBlack = func(name *types.TypeName) {} + func (r *importReader) obj(name string) { tag := r.byte() pos := r.pos() switch tag { - case aliasTag: + case aliasTag, genericAliasTag: + var tparams []*types.TypeParam + if tag == genericAliasTag { + tparams = r.tparamList() + } typ := r.typ() - // TODO(adonovan): support generic aliases: - // if tag == genericAliasTag { - // tparams := r.tparamList() - // alias.SetTypeParams(tparams) - // } - r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + markBlack(obj) // workaround for golang/go#69912 + r.declare(obj) case constTag: typ, val := r.value() @@ -589,6 +598,9 @@ func (r *importReader) obj(name string) { // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) named := types.NewNamed(obj, nil, nil) + + markBlack(obj) // workaround for golang/go#69912 + // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) @@ -615,7 +627,7 @@ func (r *importReader) obj(name string) { if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -645,7 +657,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := aliases.Unalias(constraint).(*types.Interface) + iface, _ := types.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -660,7 +672,9 @@ func (r *importReader) obj(name string) { case varTag: typ := r.typ() - r.declare(types.NewVar(pos, r.currPkg, name, typ)) + v := types.NewVar(pos, r.currPkg, name, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + r.declare(v) default: errorf("unexpected tag: %v", tag) @@ -852,7 +866,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.Interface) + _, ok := types.Unalias(t).(*types.Interface) return ok } @@ -862,7 +876,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } func (r *importReader) doType(base *types.Named) (res types.Type) { k := r.kind() if debug { - r.p.trace("importing type %d (base: %s)", k, base) + r.p.trace("importing type %d (base: %v)", k, base) r.p.indent++ defer func() { r.p.indent-- @@ -959,7 +973,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods[i] = method } - typ := newInterface(methods, embeddeds) + typ := types.NewInterfaceType(methods, embeddeds) r.p.interfaceList = append(r.p.interfaceList, typ) return typ @@ -1051,7 +1065,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + xs[i] = types.Unalias(r.typ()).(*types.TypeParam) } return xs } @@ -1098,3 +1112,9 @@ func (r *importReader) byte() byte { } return x } + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go new file mode 100644 index 00000000..7586bfac --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 && !go1.24 + +package gcimporter + +import ( + "go/token" + "go/types" + "unsafe" +) + +// TODO(rfindley): delete this workaround once go1.24 is assured. + +func init() { + // Update markBlack so that it correctly sets the color + // of imported TypeNames. + // + // See the doc comment for markBlack for details. + + type color uint32 + const ( + white color = iota + black + grey + ) + type object struct { + _ *types.Scope + _ token.Pos + _ *types.Package + _ string + _ types.Type + _ uint32 + color_ color + _ token.Pos + } + type typeName struct { + object + } + + // If the size of types.TypeName changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) + var _ [-delta * delta]int + + markBlack = func(obj *types.TypeName) { + type uP = unsafe.Pointer + var ptr *typeName + *(*uP)(uP(&ptr)) = uP(obj) + ptr.color_ = black + } +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go new file mode 100644 index 00000000..907c8557 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go @@ -0,0 +1,91 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "go/types" + "sync" +) + +// predecl is a cache for the predeclared types in types.Universe. +// +// Cache a distinct result based on the runtime value of any. +// The pointer value of the any type varies based on GODEBUG settings. +var predeclMu sync.Mutex +var predecl map[types.Type][]types.Type + +func predeclared() []types.Type { + anyt := types.Universe.Lookup("any").Type() + + predeclMu.Lock() + defer predeclMu.Unlock() + + if pre, ok := predecl[anyt]; ok { + return pre + } + + if predecl == nil { + predecl = make(map[types.Type][]types.Type) + } + + decls := []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + anyt, + } + + predecl[anyt] = decls + return decls +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go new file mode 100644 index 00000000..4af810dc --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "bufio" + "io" + "strconv" + "strings" +) + +// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader. +func readArchiveHeader(b *bufio.Reader, name string) int { + // architecture-independent object file output + const HeaderSize = 60 + + var buf [HeaderSize]byte + if _, err := io.ReadFull(b, buf[:]); err != nil { + return -1 + } + aname := strings.Trim(string(buf[0:16]), " ") + if !strings.HasPrefix(aname, name) { + return -1 + } + asize := strings.Trim(string(buf[48:58]), " ") + i, _ := strconv.Atoi(asize) + return i +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index 0cd3b91b..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 38b624ca..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.unified -// +build !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5118d0b..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.unified -// +build goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 2c077068..37b4a39e 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -11,10 +11,10 @@ import ( "go/token" "go/types" "sort" - "strings" "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" + "golang.org/x/tools/internal/typesinternal" ) // A pkgReader holds the shared state for reading a unified IR package @@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) { // See cmd/compile/internal/noder.derivedInfo. type derivedInfo struct { - idx pkgbits.Index - needed bool + idx pkgbits.Index } // See cmd/compile/internal/noder.typeInfo. @@ -72,7 +71,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] } s := string(data) - s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) pkg = readUnifiedPackage(fset, nil, imports, input) return @@ -110,13 +108,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + if r.Version().Has(pkgbits.HasInit) { + r.Bool() + } for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, // to avoid eager loading of imports. r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } r.p.objIdx(r.Reloc(pkgbits.RelocObj)) assert(r.Len() == 0) } @@ -165,7 +167,7 @@ type readerDict struct { // tparams is a slice of the constructed TypeParams for the element. tparams []*types.TypeParam - // devived is a slice of types derived from tparams, which may be + // derived is a slice of types derived from tparams, which may be // instantiated while reading the current element. derived []derivedInfo derivedTypes []types.Type // lazily instantiated from derived @@ -263,7 +265,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { func (r *reader) doPkg() *types.Package { path := r.String() switch path { - case "": + // cmd/compile emits path="main" for main packages because + // that's the linker symbol prefix it used; but we need + // the package's path as it would be reported by go list, + // hence "main" below. + // See test at go/packages.TestMainPackagePathInModeTypes. + case "", "main": path = r.p.PkgPath() case "builtin": return nil // universe @@ -471,7 +478,9 @@ func (r *reader) param() *types.Var { func (r *reader) obj() (types.Object, []types.Type) { r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) obj := pkgScope(pkg).Lookup(name) @@ -525,8 +534,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() + var tparams []*types.TypeParam + if r.Version().Has(pkgbits.AliasTypeParamNames) { + tparams = r.typeParamNames() + } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) case pkgbits.ObjConst: pos := r.pos() @@ -553,14 +566,15 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) sig := fn.Type().(*types.Signature) recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) - methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + typesinternal.SetVarKind(recv, typesinternal.RecvVar) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic())) } embeds := make([]types.Type, iface.NumEmbeddeds()) @@ -607,7 +621,9 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjVar: pos := r.pos() typ := r.typ() - declare(types.NewVar(pos, objPkg, objName, typ)) + v := types.NewVar(pos, objPkg, objName, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + declare(v) } } @@ -632,7 +648,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { dict.derived = make([]derivedInfo, r.Len()) dict.derivedTypes = make([]types.Type, len(dict.derived)) for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} + if r.Version().Has(pkgbits.DerivedInfoNeeded) { + assert(!r.Bool()) + } } pr.retireReader(r) @@ -726,3 +745,17 @@ func pkgScope(pkg *types.Package) *types.Scope { } return types.Universe } + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index af0ee6c6..58721202 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -16,7 +16,6 @@ import ( "os" "os/exec" "path/filepath" - "reflect" "regexp" "runtime" "strconv" @@ -29,7 +28,7 @@ import ( "golang.org/x/tools/internal/event/label" ) -// An Runner will run go command invocations and serialize +// A Runner will run go command invocations and serialize // them if it sees a concurrency error. type Runner struct { // once guards the runner initialization. @@ -142,7 +141,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde // Wait for all in-progress go commands to return before proceeding, // to avoid load concurrency errors. - for i := 0; i < maxInFlight; i++ { + for range maxInFlight { select { case <-ctx.Done(): return ctx.Err(), ctx.Err() @@ -180,7 +179,7 @@ type Invocation struct { CleanEnv bool Env []string WorkingDir string - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) } // Postcondition: both error results have same nilness. @@ -200,12 +199,14 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io return } -func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { - log := i.Logf - if log == nil { - log = func(string, ...interface{}) {} +// logf logs if i.Logf is non-nil. +func (i *Invocation) logf(format string, args ...any) { + if i.Logf != nil { + i.Logf(format, args...) } +} +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { goArgs := []string{i.Verb} appendModFile := func() { @@ -248,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Stdout = stdout cmd.Stderr = stderr - // cmd.WaitDelay was added only in go1.20 (see #50436). - if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - waitDelay.Set(reflect.ValueOf(30 * time.Second)) - } + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the @@ -277,7 +275,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Dir = i.WorkingDir } - defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + debugStr := cmdDebugStr(cmd) + i.logf("starting %v", debugStr) + start := time.Now() + defer func() { + i.logf("%s for %v", time.Since(start), debugStr) + }() return runCmdContext(ctx, cmd) } @@ -385,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { case err := <-resChan: return err case <-timer.C: - HandleHangingGoCommand(startTime, cmd) + // HandleHangingGoCommand terminates this process. + // Pass off resChan in case we can collect the command error. + handleHangingGoCommand(startTime, cmd, resChan) case <-ctx.Done(): } } else { @@ -410,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { } // Didn't shut down in response to interrupt. Kill it hard. - // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT - // on certain platforms, such as unix. if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } @@ -419,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { return <-resChan } -func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) { +// handleHangingGoCommand outputs debugging information to help diagnose the +// cause of a hanging Go command, and then exits with log.Fatalf. +func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) { switch runtime.GOOS { - case "linux", "darwin", "freebsd", "netbsd": + case "linux", "darwin", "freebsd", "netbsd", "openbsd": fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND -The gopls test runner has detected a hanging go command. In order to debug -this, the output of ps and lsof/fstat is printed below. + The gopls test runner has detected a hanging go command. In order to debug + this, the output of ps and lsof/fstat is printed below. -See golang/go#54461 for more details.`) + See golang/go#54461 for more details.`) fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") fmt.Fprintln(os.Stderr, "-------------------------") @@ -435,7 +440,7 @@ See golang/go#54461 for more details.`) psCmd.Stdout = os.Stderr psCmd.Stderr = os.Stderr if err := psCmd.Run(); err != nil { - panic(fmt.Sprintf("running ps: %v", err)) + log.Printf("Handling hanging Go command: running ps: %v", err) } listFiles := "lsof" @@ -449,10 +454,24 @@ See golang/go#54461 for more details.`) listFilesCmd.Stdout = os.Stderr listFilesCmd.Stderr = os.Stderr if err := listFilesCmd.Run(); err != nil { - panic(fmt.Sprintf("running %s: %v", listFiles, err)) + log.Printf("Handling hanging Go command: running %s: %v", listFiles, err) + } + // Try to extract information about the slow go process by issuing a SIGQUIT. + if err := cmd.Process.Signal(sigStuckProcess); err == nil { + select { + case err := <-resChan: + stderr := "not a bytes.Buffer" + if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil { + stderr = buf.String() + } + log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr) + case <-time.After(5 * time.Second): + } + } else { + log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err) } } - panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)) + log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid) } func cmdDebugStr(cmd *exec.Cmd) string { @@ -514,7 +533,7 @@ func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), for k, v := range overlay { // Use a unique basename for each file (001-foo.go), // to avoid creating nested directories. - base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k)) + base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k)) filename := filepath.Join(dir, base) err := os.WriteFile(filename, v, 0666) if err != nil { diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go new file mode 100644 index 00000000..469c648e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package gocommand + +import "os" + +// sigStuckProcess is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var sigStuckProcess = os.Kill diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go new file mode 100644 index 00000000..169d37c8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package gocommand + +import "syscall" + +// Sigstuckprocess is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var sigStuckProcess = syscall.SIGQUIT diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 83615155..5252144d 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -14,6 +14,7 @@ import ( "os" "path/filepath" "runtime" + "slices" "strings" "sync" "time" @@ -22,7 +23,7 @@ import ( // Options controls the behavior of a Walk call. type Options struct { // If Logf is non-nil, debug logging is enabled through this function. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Search module caches. Also disables legacy goimports ignore rules. ModulesEnabled bool @@ -81,7 +82,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root // walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { if opts.Logf == nil { - opts.Logf = func(format string, args ...interface{}) {} + opts.Logf = func(format string, args ...any) {} } if _, err := os.Stat(root.Path); os.IsNotExist(err) { opts.Logf("skipping nonexistent directory: %v", root.Path) @@ -195,10 +196,8 @@ func (w *walker) getIgnoredDirs(path string) []string { // shouldSkipDir reports whether the file should be skipped or not. func (w *walker) shouldSkipDir(dir string) bool { - for _, ignoredDir := range w.ignoredDirs { - if dir == ignoredDir { - return true - } + if slices.Contains(w.ignoredDirs, dir) { + return true } if w.skip != nil { // Check with the user specified callback. diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 4569313a..89b96381 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -32,6 +32,7 @@ import ( "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" "golang.org/x/tools/internal/stdlib" + "maps" ) // importToGroup is a list of functions which map from an import path to @@ -90,18 +91,6 @@ type ImportFix struct { Relevance float64 // see pkg } -// An ImportInfo represents a single import statement. -type ImportInfo struct { - ImportPath string // import path, e.g. "crypto/rand". - Name string // import name, e.g. "crand", or "" if none. -} - -// A packageInfo represents what's known about a package. -type packageInfo struct { - name string // real package name, if known. - exports map[string]bool // known exports. -} - // parseOtherFiles parses all the Go files in srcDir except filename, including // test files if filename looks like a test. // @@ -130,7 +119,7 @@ func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename continue } - f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0) + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution) if err != nil { continue } @@ -161,8 +150,8 @@ func addGlobals(f *ast.File, globals map[string]bool) { // collectReferences builds a map of selector expressions, from // left hand side (X) to a set of right hand sides (Sel). -func collectReferences(f *ast.File) references { - refs := references{} +func collectReferences(f *ast.File) References { + refs := References{} var visitor visitFn visitor = func(node ast.Node) ast.Visitor { @@ -232,7 +221,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { allFound := true for right := range syms { - if !pkgInfo.exports[right] { + if !pkgInfo.Exports[right] { allFound = false break } @@ -245,11 +234,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { return nil } -// references is set of references found in a Go file. The first map key is the -// left hand side of a selector expression, the second key is the right hand -// side, and the value should always be true. -type references map[string]map[string]bool - // A pass contains all the inputs and state necessary to fix a file's imports. // It can be modified in some ways during use; see comments below. type pass struct { @@ -257,27 +241,29 @@ type pass struct { fset *token.FileSet // fset used to parse f and its siblings. f *ast.File // the file being fixed. srcDir string // the directory containing f. - env *ProcessEnv // the environment to use for go commands, etc. - loadRealPackageNames bool // if true, load package names from disk rather than guessing them. - otherFiles []*ast.File // sibling files. + logf func(string, ...any) + source Source // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. + goroot string // Intermediate state, generated by load. existingImports map[string][]*ImportInfo - allRefs references - missingRefs references + allRefs References + missingRefs References // Inputs to fix. These can be augmented between successive fix calls. lastTry bool // indicates that this is the last call and fix should clean up as best it can. candidates []*ImportInfo // candidate imports in priority order. - knownPackages map[string]*packageInfo // information about all known packages. + knownPackages map[string]*PackageInfo // information about all known packages. } // loadPackageNames saves the package names for everything referenced by imports. -func (p *pass) loadPackageNames(imports []*ImportInfo) error { - if p.env.Logf != nil { - p.env.Logf("loading package names for %v packages", len(imports)) +func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error { + if p.logf != nil { + p.logf("loading package names for %v packages", len(imports)) defer func() { - p.env.Logf("done loading package names for %v packages", len(imports)) + p.logf("done loading package names for %v packages", len(imports)) }() } var unknown []string @@ -288,20 +274,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error { unknown = append(unknown, imp.ImportPath) } - resolver, err := p.env.GetResolver() - if err != nil { - return err - } - - names, err := resolver.loadPackageNames(unknown, p.srcDir) + names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown) if err != nil { return err } + // TODO(rfindley): revisit this. Why do we need to store known packages with + // no exports? The inconsistent data is confusing. for path, name := range names { - p.knownPackages[path] = &packageInfo{ - name: name, - exports: map[string]bool{}, + p.knownPackages[path] = &PackageInfo{ + Name: name, + Exports: map[string]bool{}, } } return nil @@ -329,8 +312,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { return imp.Name } known := p.knownPackages[imp.ImportPath] - if known != nil && known.name != "" { - return withoutVersion(known.name) + if known != nil && known.Name != "" { + return withoutVersion(known.Name) } return ImportPathToAssumedName(imp.ImportPath) } @@ -338,9 +321,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { // load reads in everything necessary to run a pass, and reports whether the // file already has all the imports it needs. It fills in p.missingRefs with the // file's missing symbols, if any, or removes unused imports if not. -func (p *pass) load() ([]*ImportFix, bool) { - p.knownPackages = map[string]*packageInfo{} - p.missingRefs = references{} +func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) { + p.knownPackages = map[string]*PackageInfo{} + p.missingRefs = References{} p.existingImports = map[string][]*ImportInfo{} // Load basic information about the file in question. @@ -363,10 +346,10 @@ func (p *pass) load() ([]*ImportFix, bool) { // f's imports by the identifier they introduce. imports := collectImports(p.f) if p.loadRealPackageNames { - err := p.loadPackageNames(append(imports, p.candidates...)) + err := p.loadPackageNames(ctx, append(imports, p.candidates...)) if err != nil { - if p.env.Logf != nil { - p.env.Logf("loading package names: %v", err) + if p.logf != nil { + p.logf("loading package names: %v", err) } return nil, false } @@ -536,9 +519,10 @@ func (p *pass) assumeSiblingImportsValid() { // We have the stdlib in memory; no need to guess. rights = symbolNameSet(m) } - p.addCandidate(imp, &packageInfo{ + // TODO(rfindley): we should set package name here, for consistency. + p.addCandidate(imp, &PackageInfo{ // no name; we already know it. - exports: rights, + Exports: rights, }) } } @@ -547,14 +531,14 @@ func (p *pass) assumeSiblingImportsValid() { // addCandidate adds a candidate import to p, and merges in the information // in pkg. -func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { +func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) { p.candidates = append(p.candidates, imp) if existing, ok := p.knownPackages[imp.ImportPath]; ok { - if existing.name == "" { - existing.name = pkg.name + if existing.Name == "" { + existing.Name = pkg.Name } - for export := range pkg.exports { - existing.exports[export] = true + for export := range pkg.Exports { + existing.Exports[export] = true } } else { p.knownPackages[imp.ImportPath] = pkg @@ -563,33 +547,61 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { // fixImports adds and removes imports from f so that all its references are // satisfied and there are no unused imports. -func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { +// +// This is declared as a variable rather than a function so goimports can +// easily be extended by adding a file with an init function. +// +// DO NOT REMOVE: used internally at Google. +var fixImports = fixImportsDefault + +func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { fixes, err := getFixes(context.Background(), fset, f, filename, env) if err != nil { return err } apply(fset, f, fixes) - return err + return nil } // getFixes gets the import fixes that need to be made to f in order to fix the imports. // It does not modify the ast. func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { + source, err := NewProcessEnvSource(env, filename, f.Name.Name) + if err != nil { + return nil, err + } + goEnv, err := env.goEnv() + if err != nil { + return nil, err + } + return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source) +} + +func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) { + // This logic is defensively duplicated from getFixes. abs, err := filepath.Abs(filename) if err != nil { return nil, err } srcDir := filepath.Dir(abs) - if env.Logf != nil { - env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + + if logf != nil { + logf("fixImports(filename=%q), srcDir=%q ...", filename, srcDir) } // First pass: looking only at f, and using the naive algorithm to // derive package names from import paths, see if the file is already // complete. We can't add any imports yet, because we don't know // if missing references are actually package vars. - p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} - if fixes, done := p.load(); done { + p := &pass{ + fset: fset, + f: f, + srcDir: srcDir, + logf: logf, + goroot: goroot, + source: source, + } + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -601,7 +613,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st // Second pass: add information from other files in the same package, // like their package vars and imports. p.otherFiles = otherFiles - if fixes, done := p.load(); done { + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -614,10 +626,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st // Third pass: get real package names where we had previously used // the naive algorithm. - p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} + p = &pass{ + fset: fset, + f: f, + srcDir: srcDir, + logf: logf, + goroot: goroot, + source: p.source, // safe to reuse, as it's just a wrapper around env + } p.loadRealPackageNames = true p.otherFiles = otherFiles - if fixes, done := p.load(); done { + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -762,7 +781,7 @@ func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix return true }, dirFound: func(pkg *pkg) bool { - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } // Try the assumed package name first, then a simpler path match @@ -797,7 +816,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, return true }, dirFound: func(pkg *pkg) bool { - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } return strings.HasPrefix(pkg.importPathShort, searchPrefix) @@ -831,7 +850,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return true }, dirFound: func(pkg *pkg) bool { - return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + return pkgIsCandidate(filename, References{searchPkg: nil}, pkg) }, packageNameLoaded: func(pkg *pkg) bool { return pkg.packageName == searchPkg @@ -909,7 +928,7 @@ type ProcessEnv struct { WorkingDir string // If Logf is non-nil, debug logging is enabled through this function. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // If set, ModCache holds a shared cache of directory info to use across // multiple ProcessEnvs. @@ -950,9 +969,7 @@ func (e *ProcessEnv) CopyConfig() *ProcessEnv { resolver: nil, Env: map[string]string{}, } - for k, v := range e.Env { - copy.Env[k] = v - } + maps.Copy(copy.Env, e.Env) return copy } @@ -985,9 +1002,7 @@ func (e *ProcessEnv) init() error { if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { return err } - for k, v := range goEnv { - e.Env[k] = v - } + maps.Copy(e.Env, goEnv) e.initialized = true return nil } @@ -1012,18 +1027,28 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { // // For gopls, we can optionally explicitly choose a resolver type, since we // already know the view type. - if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + if e.Env["GOMOD"] == "" && (e.Env["GOWORK"] == "" || e.Env["GOWORK"] == "off") { e.resolver = newGopathResolver(e) + e.logf("created gopath resolver") } else if r, err := newModuleResolver(e, e.ModCache); err != nil { e.resolverErr = err + e.logf("failed to create module resolver: %v", err) } else { e.resolver = Resolver(r) + e.logf("created module resolver") } } return e.resolver, e.resolverErr } +// logf logs if e.Logf is non-nil. +func (e *ProcessEnv) logf(format string, args ...any) { + if e.Logf != nil { + e.Logf(format, args...) + } +} + // buildContext returns the build.Context to use for matching files. // // TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform @@ -1072,11 +1097,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) return e.GocmdRunner.Run(ctx, inv) } -func addStdlibCandidates(pass *pass, refs references) error { - goenv, err := pass.env.goEnv() - if err != nil { - return err - } +func addStdlibCandidates(pass *pass, refs References) error { localbase := func(nm string) string { ans := path.Base(nm) if ans[0] == 'v' { @@ -1091,13 +1112,13 @@ func addStdlibCandidates(pass *pass, refs references) error { } add := func(pkg string) { // Prevent self-imports. - if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir { return } exports := symbolNameSet(stdlib.PackageSymbols[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, - &packageInfo{name: localbase(pkg), exports: exports}) + &PackageInfo{Name: localbase(pkg), Exports: exports}) } for left := range refs { if left == "rand" { @@ -1108,6 +1129,9 @@ func addStdlibCandidates(pass *pass, refs references) error { // but we have no way of figuring out what the user is using // TODO: investigate using the toolchain version to disambiguate in the stdlib add("math/rand/v2") + // math/rand has an overlapping API + // TestIssue66407 fails without this + add("math/rand") continue } for importPath := range stdlib.PackageSymbols { @@ -1127,8 +1151,8 @@ type Resolver interface { // scan works with callback to search for packages. See scanCallback for details. scan(ctx context.Context, callback *scanCallback) error - // loadExports returns the set of exported symbols in the package at dir. - // loadExports may be called concurrently. + // loadExports returns the package name and set of exported symbols in the + // package at dir. loadExports may be called concurrently. loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) // scoreImportPath returns the relevance for an import path. @@ -1161,101 +1185,22 @@ type scanCallback struct { exportsLoaded func(pkg *pkg, exports []stdlib.Symbol) } -func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { +func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error { ctx, done := event.Start(ctx, "imports.addExternalCandidates") defer done() - var mu sync.Mutex - found := make(map[string][]pkgDistance) - callback := &scanCallback{ - rootFound: func(gopathwalk.Root) bool { - return true // We want everything. - }, - dirFound: func(pkg *pkg) bool { - return pkgIsCandidate(filename, refs, pkg) - }, - packageNameLoaded: func(pkg *pkg) bool { - if _, want := refs[pkg.packageName]; !want { - return false - } - if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { - // The candidate is in the same directory and has the - // same package name. Don't try to import ourselves. - return false - } - if !canUse(filename, pkg.dir) { - return false - } - mu.Lock() - defer mu.Unlock() - found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) - return false // We'll do our own loading after we sort. - }, - } - resolver, err := pass.env.GetResolver() + results, err := pass.source.ResolveReferences(ctx, filename, refs) if err != nil { return err } - if err = resolver.scan(ctx, callback); err != nil { - return err - } - // Search for imports matching potential package references. - type result struct { - imp *ImportInfo - pkg *packageInfo - } - results := make(chan result, len(refs)) - - ctx, cancel := context.WithCancel(ctx) - var wg sync.WaitGroup - defer func() { - cancel() - wg.Wait() - }() - var ( - firstErr error - firstErrOnce sync.Once - ) - for pkgName, symbols := range refs { - wg.Add(1) - go func(pkgName string, symbols map[string]bool) { - defer wg.Done() - - found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols) - - if err != nil { - firstErrOnce.Do(func() { - firstErr = err - cancel() - }) - return - } - - if found == nil { - return // No matching package. - } - - imp := &ImportInfo{ - ImportPath: found.importPathShort, - } - - pkg := &packageInfo{ - name: pkgName, - exports: symbols, - } - results <- result{imp, pkg} - }(pkgName, symbols) - } - go func() { - wg.Wait() - close(results) - }() - - for result := range results { + for _, result := range results { + if result == nil { + continue + } // Don't offer completions that would shadow predeclared // names, such as github.com/coreos/etcd/error. - if types.Universe.Lookup(result.pkg.name) != nil { // predeclared + if types.Universe.Lookup(result.Package.Name) != nil { // predeclared // Ideally we would skip this candidate only // if the predeclared name is actually // referenced by the file, but that's a lot @@ -1264,9 +1209,9 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil // user before long. continue } - pass.addCandidate(result.imp, result.pkg) + pass.addCandidate(result.Import, result.Package) } - return firstErr + return nil } // notIdentifier reports whether ch is an invalid identifier character. @@ -1608,11 +1553,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } fullFile := filepath.Join(dir, fi.Name()) + // Legacy ast.Object resolution is needed here. f, err := parser.ParseFile(fset, fullFile, nil, 0) if err != nil { - if env.Logf != nil { - env.Logf("error parsing %v: %v", fullFile, err) - } + env.logf("error parsing %v: %v", fullFile, err) continue } if f.Name.Name == "documentation" { @@ -1648,9 +1592,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } sortSymbols(exports) - if env.Logf != nil { - env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) - } + env.logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) return pkgName, exports, nil } @@ -1660,25 +1602,39 @@ func sortSymbols(syms []stdlib.Symbol) { }) } -// findImport searches for a package with the given symbols. -// If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { +// A symbolSearcher searches for a package with a set of symbols, among a set +// of candidates. See [symbolSearcher.search]. +// +// The search occurs within the scope of a single file, with context captured +// in srcDir and xtest. +type symbolSearcher struct { + logf func(string, ...any) + srcDir string // directory containing the file + xtest bool // if set, the file containing is an x_test file + loadExports func(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) +} + +// search searches the provided candidates for a package containing all +// exported symbols. +// +// If successful, returns the resulting package. +func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so // there's no "penalty" for vendoring. sort.Sort(byDistanceOrImportPathShortLength(candidates)) - if pass.env.Logf != nil { + if s.logf != nil { for i, c := range candidates { - pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) + s.logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) } } - resolver, err := pass.env.GetResolver() - if err != nil { - return nil, err - } - // Collect exports for packages with matching names. + // Arrange rescv so that we can we can await results in order of relevance + // and exit as soon as we find the first match. + // + // Search with bounded concurrency, returning as soon as the first result + // among rescv is non-nil. rescv := make([]chan *pkg, len(candidates)) for i := range candidates { rescv[i] = make(chan *pkg, 1) @@ -1686,6 +1642,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa const maxConcurrentPackageImport = 4 loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + // Ensure that all work is completed at exit. ctx, cancel := context.WithCancel(ctx) var wg sync.WaitGroup defer func() { @@ -1693,6 +1650,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa wg.Wait() }() + // Start the search. wg.Add(1) go func() { defer wg.Done() @@ -1703,55 +1661,67 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa return } + i := i + c := c wg.Add(1) - go func(c pkgDistance, resc chan<- *pkg) { + go func() { defer func() { <-loadExportsSem wg.Done() }() - - if pass.env.Logf != nil { - pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) + if s.logf != nil { + s.logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) } - // If we're an x_test, load the package under test's test variant. - includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir - _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) + pkg, err := s.searchOne(ctx, c, symbols) if err != nil { - if pass.env.Logf != nil { - pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) + if s.logf != nil && ctx.Err() == nil { + s.logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) } - resc <- nil - return + pkg = nil } - - exportsMap := make(map[string]bool, len(exports)) - for _, sym := range exports { - exportsMap[sym.Name] = true - } - - // If it doesn't have the right - // symbols, send nil to mean no match. - for symbol := range symbols { - if !exportsMap[symbol] { - resc <- nil - return - } - } - resc <- c.pkg - }(c, rescv[i]) + rescv[i] <- pkg // may be nil + }() } }() + // Await the first (best) result. for _, resc := range rescv { - pkg := <-resc - if pkg == nil { - continue + select { + case r := <-resc: + if r != nil { + return r, nil + } + case <-ctx.Done(): + return nil, ctx.Err() } - return pkg, nil } return nil, nil } +func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols map[string]bool) (*pkg, error) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + // If we're considering the package under test from an x_test, load the + // test variant. + includeTest := s.xtest && c.pkg.dir == s.srcDir + _, exports, err := s.loadExports(ctx, c.pkg, includeTest) + if err != nil { + return nil, err + } + + exportsMap := make(map[string]bool, len(exports)) + for _, sym := range exports { + exportsMap[sym.Name] = true + } + for symbol := range symbols { + if !exportsMap[symbol] { + return nil, nil // no match + } + } + return c.pkg, nil +} + // pkgIsCandidate reports whether pkg is a candidate for satisfying the // finding which package pkgIdent in the file named by filename is trying // to refer to. @@ -1764,68 +1734,34 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa // filename is the file being formatted. // pkgIdent is the package being searched for, like "client" (if // searching for "client.New") -func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { +func pkgIsCandidate(filename string, refs References, pkg *pkg) bool { // Check "internal" and "vendor" visibility: - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } // Speed optimization to minimize disk I/O: - // the last two components on disk must contain the - // package name somewhere. // - // This permits mismatch naming like directory - // "go-foo" being package "foo", or "pkg.v3" being "pkg", - // or directory "google.golang.org/api/cloudbilling/v1" - // being package "cloudbilling", but doesn't - // permit a directory "foo" to be package - // "bar", which is strongly discouraged - // anyway. There's no reason goimports needs - // to be slow just to accommodate that. + // Use the matchesPath heuristic to filter to package paths that could + // reasonably match a dangling reference. + // + // This permits mismatch naming like directory "go-foo" being package "foo", + // or "pkg.v3" being "pkg", or directory + // "google.golang.org/api/cloudbilling/v1" being package "cloudbilling", but + // doesn't permit a directory "foo" to be package "bar", which is strongly + // discouraged anyway. There's no reason goimports needs to be slow just to + // accommodate that. for pkgIdent := range refs { - lastTwo := lastTwoComponents(pkg.importPathShort) - if strings.Contains(lastTwo, pkgIdent) { - return true - } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) - if strings.Contains(lastTwo, pkgIdent) { - return true - } - } - } - return false -} - -func hasHyphenOrUpperASCII(s string) bool { - for i := 0; i < len(s); i++ { - b := s[i] - if b == '-' || ('A' <= b && b <= 'Z') { + if matchesPath(pkgIdent, pkg.importPathShort) { return true } } return false } -func lowerASCIIAndRemoveHyphen(s string) (ret string) { - buf := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case b == '-': - continue - case 'A' <= b && b <= 'Z': - buf = append(buf, b+('a'-'A')) - default: - buf = append(buf, b) - } - } - return string(buf) -} - -// canUse reports whether the package in dir is usable from filename, +// CanUse reports whether the package in dir is usable from filename, // respecting the Go "internal" and "vendor" visibility rules. -func canUse(filename, dir string) bool { +func CanUse(filename, dir string) bool { // Fast path check, before any allocations. If it doesn't contain vendor // or internal, it's not tricky: // Note that this can false-negative on directories like "notinternal", @@ -1863,19 +1799,84 @@ func canUse(filename, dir string) bool { return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") } -// lastTwoComponents returns at most the last two path components -// of v, using either / or \ as the path separator. -func lastTwoComponents(v string) string { +// matchesPath reports whether ident may match a potential package name +// referred to by path, using heuristics to filter out unidiomatic package +// names. +// +// Specifically, it checks whether either of the last two '/'- or '\'-delimited +// path segments matches the identifier. The segment-matching heuristic must +// allow for various conventions around segment naming, including go-foo, +// foo-go, and foo.v3. To handle all of these, matching considers both (1) the +// entire segment, ignoring '-' and '.', as well as (2) the last subsegment +// separated by '-' or '.'. So the segment foo-go matches all of the following +// identifiers: foo, go, and foogo. All matches are case insensitive (for ASCII +// identifiers). +// +// See the docstring for [pkgIsCandidate] for an explanation of how this +// heuristic filters potential candidate packages. +func matchesPath(ident, path string) bool { + // Ignore case, for ASCII. + lowerIfASCII := func(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b + } + + // match reports whether path[start:end] matches ident, ignoring [.-]. + match := func(start, end int) bool { + ii := len(ident) - 1 // current byte in ident + pi := end - 1 // current byte in path + for ; pi >= start && ii >= 0; pi-- { + pb := path[pi] + if pb == '-' || pb == '.' { + continue + } + pb = lowerIfASCII(pb) + ib := lowerIfASCII(ident[ii]) + if pb != ib { + return false + } + ii-- + } + return ii < 0 && pi < start // all bytes matched + } + + // segmentEnd and subsegmentEnd hold the end points of the current segment + // and subsegment intervals. + segmentEnd := len(path) + subsegmentEnd := len(path) + + // Count slashes; we only care about the last two segments. nslash := 0 - for i := len(v) - 1; i >= 0; i-- { - if v[i] == '/' || v[i] == '\\' { + + for i := len(path) - 1; i >= 0; i-- { + switch b := path[i]; b { + // TODO(rfindley): we handle backlashes here only because the previous + // heuristic handled backslashes. This is perhaps overly defensive, but is + // the result of many lessons regarding Chesterton's fence and the + // goimports codebase. + // + // However, this function is only ever called with something called an + // 'importPath'. Is it possible that this is a real import path, and + // therefore we need only consider forward slashes? + case '/', '\\': + if match(i+1, segmentEnd) || match(i+1, subsegmentEnd) { + return true + } nslash++ if nslash == 2 { - return v[i:] + return false // did not match above } + segmentEnd, subsegmentEnd = i, i // reset + case '-', '.': + if match(i+1, subsegmentEnd) { + return true + } + subsegmentEnd = i } } - return v + return match(0, segmentEnd) || match(0, subsegmentEnd) } type visitFn func(node ast.Node) ast.Visitor diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index f8346552..2215a128 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -47,7 +47,14 @@ type Options struct { // Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { fileSet := token.NewFileSet() - file, adjust, err := parse(fileSet, filename, src, opt) + var parserMode parser.Mode + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment) if err != nil { return nil, err } @@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. -func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { +func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) { ctx, done := event.Start(ctx, "imports.FixImports") defer done() fileSet := token.NewFileSet() - file, _, err := parse(fileSet, filename, src, opt) + // TODO(rfindley): these default values for ParseComments and AllErrors were + // extracted from gopls, but are they even needed? + file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true) if err != nil { return nil, err } - return getFixes(ctx, fileSet, file, filename, opt.Env) + return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source) } // ApplyFixes applies all of the fixes to the file and formats it. extraMode @@ -86,7 +95,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e // Don't use parse() -- we don't care about fragments or statement lists // here, and we need to work with unparseable files. fileSet := token.NewFileSet() - parserMode := parser.Mode(0) + parserMode := parser.SkipObjectResolution if opt.Comments { parserMode |= parser.ParseComments } @@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e // formatted file, and returns the postpocessed result. func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { mergeImports(file) - sortImports(opt.LocalPrefix, fset.File(file.Pos()), file) + sortImports(opt.LocalPrefix, fset.File(file.FileStart), file) var spacesBefore []string // import paths we need spaces before for _, impSection := range astutil.Imports(fset, file) { // Within each block of contiguous imports, see if any @@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori // parse parses src, which was read from filename, // as a Go source file or statement list. -func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { - parserMode := parser.Mode(0) - if opt.Comments { - parserMode |= parser.ParseComments - } - if opt.AllErrors { - parserMode |= parser.AllErrors +func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) { + if parserMode&parser.SkipObjectResolution != 0 { + panic("legacy ast.Object resolution is required") } // Try as whole source file. @@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast // If the error is that the source file didn't begin with a // package line and we accept fragmented input, fall through to // try as a source fragment. Stop and return on any other error. - if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { + if !fragment || !strings.Contains(err.Error(), "expected 'package'") { return nil, nil, err } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 82fe644a..df94ec81 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -13,6 +13,7 @@ import ( "path" "path/filepath" "regexp" + "slices" "sort" "strconv" "strings" @@ -150,8 +151,8 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe Path: "", Dir: filepath.Join(filepath.Dir(goWork), "vendor"), } - r.modsByModPath = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) - r.modsByDir = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) + r.modsByModPath = append(slices.Clone(mainModsVendor), r.dummyVendorMod) + r.modsByDir = append(slices.Clone(mainModsVendor), r.dummyVendorMod) } } else { // Vendor mode is off, so run go list -m ... to find everything. @@ -245,7 +246,10 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe // 2. Use this to separate module cache scanning from other scanning. func gomodcacheForEnv(goenv map[string]string) string { if gmc := goenv["GOMODCACHE"]; gmc != "" { - return gmc + // golang/go#67156: ensure that the module cache is clean, since it is + // assumed as a prefix to directories scanned by gopathwalk, which are + // themselves clean. + return filepath.Clean(gmc) } gopaths := filepath.SplitList(goenv["GOPATH"]) if len(gopaths) == 0 { @@ -265,9 +269,7 @@ func (r *ModuleResolver) initAllMods() error { return err } if mod.Dir == "" { - if r.env.Logf != nil { - r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) - } + r.env.logf("module %v has not been downloaded and will be ignored", mod.Path) // Can't do anything with a module that's not downloaded. continue } @@ -742,8 +744,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { subdir := "" - if dir != root.Path { - subdir = dir[len(root.Path)+len("/"):] + if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) { + subdir = dir[len(prefix):] } importPath := filepath.ToSlash(subdir) if strings.HasPrefix(importPath, "vendor/") { @@ -766,9 +768,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir } modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) if err != nil { - if r.env.Logf != nil { - r.env.Logf("decoding module cache path %q: %v", subdir, err) - } + r.env.logf("decoding module cache path %q: %v", subdir, err) return directoryPackageInfo{ status: directoryScanned, err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index b1192696..b96c9d4b 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -128,7 +128,7 @@ func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener // are going to be. Setting an arbitrary limit makes it much easier. const maxInFlight = 10 sema := make(chan struct{}, maxInFlight) - for i := 0; i < maxInFlight; i++ { + for range maxInFlight { sema <- struct{}{} } @@ -156,7 +156,7 @@ func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener d.mu.Lock() delete(d.listeners, cookie) d.mu.Unlock() - for i := 0; i < maxInFlight; i++ { + for range maxInFlight { <-sema } } diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go index da8194fd..67c17bc4 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -11,6 +11,7 @@ import ( "go/ast" "go/token" "log" + "slices" "sort" "strconv" ) @@ -30,7 +31,7 @@ func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { if len(d.Specs) == 0 { // Empty import block, remove it. - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + f.Decls = slices.Delete(f.Decls, i, i+1) } if !d.Lparen.IsValid() { @@ -91,7 +92,7 @@ func mergeImports(f *ast.File) { spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() first.Specs = append(first.Specs, spec) } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + f.Decls = slices.Delete(f.Decls, i, i+1) i-- } } diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go new file mode 100644 index 00000000..cbe4f3c5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source.go @@ -0,0 +1,63 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import "context" + +// These types document the APIs below. +// +// TODO(rfindley): consider making these defined types rather than aliases. +type ( + ImportPath = string + PackageName = string + Symbol = string + + // References is set of References found in a Go file. The first map key is the + // left hand side of a selector expression, the second key is the right hand + // side, and the value should always be true. + References = map[PackageName]map[Symbol]bool +) + +// A Result satisfies a missing import. +// +// The Import field describes the missing import spec, and the Package field +// summarizes the package exports. +type Result struct { + Import *ImportInfo + Package *PackageInfo +} + +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A PackageInfo represents what's known about a package. +type PackageInfo struct { + Name string // package name in the package declaration, if known + Exports map[string]bool // set of names of known package level sortSymbols +} + +// A Source provides imports to satisfy unresolved references in the file being +// fixed. +type Source interface { + // LoadPackageNames queries PackageName information for the requested import + // paths, when operating from the provided srcDir. + // + // TODO(rfindley): try to refactor to remove this operation. + LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) + + // ResolveReferences asks the Source for the best package name to satisfy + // each of the missing references, in the context of fixing the given + // filename. + // + // Returns a map from package name to a [Result] for that package name that + // provides the required symbols. Keys may be omitted in the map if no + // candidates satisfy all missing references for that package name. It is up + // to each data source to select the best result for each entry in the + // missing map. + ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) +} diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go new file mode 100644 index 00000000..ec996c3c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -0,0 +1,129 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "path/filepath" + "strings" + "sync" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/internal/gopathwalk" +) + +// ProcessEnvSource implements the [Source] interface using the legacy +// [ProcessEnv] abstraction. +type ProcessEnvSource struct { + env *ProcessEnv + srcDir string + filename string + pkgName string +} + +// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given +// env, to be used for fixing imports in the file with name filename in package +// named pkgName. +func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) { + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + return &ProcessEnvSource{ + env: env, + srcDir: srcDir, + filename: filename, + pkgName: pkgName, + }, nil +} + +func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) { + r, err := s.env.GetResolver() + if err != nil { + return nil, err + } + return r.loadPackageNames(unknown, srcDir) +} + +func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) { + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == s.srcDir && s.pkgName == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !CanUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + resolver, err := s.env.GetResolver() + if err != nil { + return nil, err + } + if err := resolver.scan(ctx, callback); err != nil { + return nil, err + } + + g, ctx := errgroup.WithContext(ctx) + + searcher := symbolSearcher{ + logf: s.env.logf, + srcDir: s.srcDir, + xtest: strings.HasSuffix(s.pkgName, "_test"), + loadExports: resolver.loadExports, + } + + var resultMu sync.Mutex + results := make(map[string]*Result, len(refs)) + for pkgName, symbols := range refs { + g.Go(func() error { + found, err := searcher.search(ctx, found[pkgName], pkgName, symbols) + if err != nil { + return err + } + if found == nil { + return nil // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + pkg := &PackageInfo{ + Name: pkgName, + Exports: symbols, + } + resultMu.Lock() + results[pkgName] = &Result{Import: imp, Package: pkg} + resultMu.Unlock() + return nil + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + var ans []*Result + for _, x := range results { + ans = append(ans, x) + } + return ans, nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go new file mode 100644 index 00000000..05229f06 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -0,0 +1,103 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "sync" + "time" + + "golang.org/x/tools/internal/modindex" +) + +// This code is here rather than in the modindex package +// to avoid import loops + +// implements Source using modindex, so only for module cache. +// +// this is perhaps over-engineered. A new Index is read at first use. +// And then Update is called after every 15 minutes, and a new Index +// is read if the index changed. It is not clear the Mutex is needed. +type IndexSource struct { + modcachedir string + mutex sync.Mutex + ix *modindex.Index + expires time.Time +} + +// create a new Source. Called from NewView in cache/session.go. +func NewIndexSource(cachedir string) *IndexSource { + return &IndexSource{modcachedir: cachedir} +} + +func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) { + /// This is used by goimports to resolve the package names of imports of the + // current package, which is irrelevant for the module cache. + return nil, nil +} + +func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { + if err := s.maybeReadIndex(); err != nil { + return nil, err + } + var cs []modindex.Candidate + for pkg, nms := range missing { + for nm := range nms { + x := s.ix.Lookup(pkg, nm, false) + cs = append(cs, x...) + } + } + found := make(map[string]*Result) + for _, c := range cs { + var x *Result + if x = found[c.ImportPath]; x == nil { + x = &Result{ + Import: &ImportInfo{ + ImportPath: c.ImportPath, + Name: "", + }, + Package: &PackageInfo{ + Name: c.PkgName, + Exports: make(map[string]bool), + }, + } + found[c.ImportPath] = x + } + x.Package.Exports[c.Name] = true + } + var ans []*Result + for _, x := range found { + ans = append(ans, x) + } + return ans, nil +} + +func (s *IndexSource) maybeReadIndex() error { + s.mutex.Lock() + defer s.mutex.Unlock() + + var readIndex bool + if time.Now().After(s.expires) { + ok, err := modindex.Update(s.modcachedir) + if err != nil { + return err + } + if ok { + readIndex = true + } + } + + if readIndex || s.ix == nil { + ix, err := modindex.ReadIndex(s.modcachedir) + if err != nil { + return err + } + s.ix = ix + // for now refresh every 15 minutes + s.expires = time.Now().Add(time.Minute * 15) + } + + return nil +} diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go new file mode 100644 index 00000000..1e1a02f2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -0,0 +1,135 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modindex + +import ( + "fmt" + "log" + "os" + "path/filepath" + "regexp" + "slices" + "strings" + "sync" + "time" + + "golang.org/x/mod/semver" + "golang.org/x/tools/internal/gopathwalk" +) + +type directory struct { + path Relpath + importPath string + version string // semantic version + syms []symbol +} + +// filterDirs groups the directories by import path, +// sorting the ones with the same import path by semantic version, +// most recent first. +func byImportPath(dirs []Relpath) (map[string][]*directory, error) { + ans := make(map[string][]*directory) // key is import path + for _, d := range dirs { + ip, sv, err := DirToImportPathVersion(d) + if err != nil { + return nil, err + } + ans[ip] = append(ans[ip], &directory{ + path: d, + importPath: ip, + version: sv, + }) + } + for k, v := range ans { + semanticSort(v) + ans[k] = v + } + return ans, nil +} + +// sort the directories by semantic version, latest first +func semanticSort(v []*directory) { + slices.SortFunc(v, func(l, r *directory) int { + if n := semver.Compare(l.version, r.version); n != 0 { + return -n // latest first + } + return strings.Compare(string(l.path), string(r.path)) + }) +} + +// modCacheRegexp splits a relpathpath into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +// DirToImportPathVersion computes import path and semantic version +func DirToImportPathVersion(dir Relpath) (string, string, error) { + m := modCacheRegexp.FindStringSubmatch(string(dir)) + // m[1] is the module path + // m[2] is the version major.minor.patch(-
 1 && flds[1][1] == 'D',
+			}
+			if px.Type == Func {
+				n, err := strconv.Atoi(flds[2])
+				if err != nil {
+					continue // should never happen
+				}
+				px.Results = int16(n)
+				if len(flds) >= 4 {
+					sig := strings.Split(flds[3], " ")
+					for i := range sig {
+						// $ cannot otherwise occur. removing the spaces
+						// almost works, but for chan struct{}, e.g.
+						sig[i] = strings.Replace(sig[i], "$", " ", -1)
+					}
+					px.Sig = toFields(sig)
+				}
+			}
+			ans = append(ans, px)
+		}
+	}
+	return ans
+}
+
+func toFields(sig []string) []Field {
+	ans := make([]Field, len(sig)/2)
+	for i := range ans {
+		ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
+	}
+	return ans
+}
+
+// benchmarks show this is measurably better than strings.Split
+// split into first 4 fields separated by single space
+func fastSplit(x string) []string {
+	ans := make([]string, 0, 4)
+	nxt := 0
+	start := 0
+	for i := 0; i < len(x); i++ {
+		if x[i] != ' ' {
+			continue
+		}
+		ans = append(ans, x[start:i])
+		nxt++
+		start = i + 1
+		if nxt >= 3 {
+			break
+		}
+	}
+	ans = append(ans, x[start:])
+	return ans
+}
+
+func asLexType(c byte) LexType {
+	switch c {
+	case 'C':
+		return Const
+	case 'V':
+		return Var
+	case 'T':
+		return Type
+	case 'F':
+		return Func
+	}
+	return -1
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/modindex.go b/vendor/golang.org/x/tools/internal/modindex/modindex.go
new file mode 100644
index 00000000..355a53e7
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/modindex.go
@@ -0,0 +1,164 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modindex contains code for building and searching an index to
+// the Go module cache. The directory containing the index, returned by
+// IndexDir(), contains a file index-name- that contains the name
+// of the current index. We believe writing that short file is atomic.
+// ReadIndex reads that file to get the file name of the index.
+// WriteIndex writes an index with a unique name and then
+// writes that name into a new version of index-name-.
+// ( stands for the CurrentVersion of the index format.)
+package modindex
+
+import (
+	"path/filepath"
+	"slices"
+	"strings"
+	"time"
+
+	"golang.org/x/mod/semver"
+)
+
+// Create always creates a new index for the go module cache that is in cachedir.
+func Create(cachedir string) error {
+	_, err := indexModCache(cachedir, true)
+	return err
+}
+
+// Update the index for the go module cache that is in cachedir,
+// If there is no existing index it will build one.
+// If there are changed directories since the last index, it will
+// write a new one and return true. Otherwise it returns false.
+func Update(cachedir string) (bool, error) {
+	return indexModCache(cachedir, false)
+}
+
+// indexModCache writes an index current as of when it is called.
+// If clear is true the index is constructed from all of GOMODCACHE
+// otherwise the index is constructed from the last previous index
+// and the updates to the cache. It returns true if it wrote an index,
+// false otherwise.
+func indexModCache(cachedir string, clear bool) (bool, error) {
+	cachedir, err := filepath.Abs(cachedir)
+	if err != nil {
+		return false, err
+	}
+	cd := Abspath(cachedir)
+	future := time.Now().Add(24 * time.Hour) // safely in the future
+	ok, err := modindexTimed(future, cd, clear)
+	if err != nil {
+		return false, err
+	}
+	return ok, nil
+}
+
+// modindexTimed writes an index current as of onlyBefore.
+// If clear is true the index is constructed from all of GOMODCACHE
+// otherwise the index is constructed from the last previous index
+// and all the updates to the cache before onlyBefore.
+// It returns true if it wrote a new index, false if it wrote nothing.
+func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
+	var curIndex *Index
+	if !clear {
+		var err error
+		curIndex, err = ReadIndex(string(cachedir))
+		if clear && err != nil {
+			return false, err
+		}
+		// TODO(pjw): check that most of those directories still exist
+	}
+	cfg := &work{
+		onlyBefore: onlyBefore,
+		oldIndex:   curIndex,
+		cacheDir:   cachedir,
+	}
+	if curIndex != nil {
+		cfg.onlyAfter = curIndex.Changed
+	}
+	if err := cfg.buildIndex(); err != nil {
+		return false, err
+	}
+	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
+		// no changes from existing curIndex, don't write a new index
+		return false, nil
+	}
+	if err := cfg.writeIndex(); err != nil {
+		return false, err
+	}
+	return true, nil
+}
+
+type work struct {
+	onlyBefore time.Time // do not use directories later than this
+	onlyAfter  time.Time // only interested in directories after this
+	// directories from before onlyAfter come from oldIndex
+	oldIndex *Index
+	newIndex *Index
+	cacheDir Abspath
+}
+
+func (w *work) buildIndex() error {
+	// The effective date of the new index should be at least
+	// slightly earlier than when the directories are scanned
+	// so set it now.
+	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
+	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
+	if len(dirs) == 0 {
+		return nil
+	}
+	newdirs, err := byImportPath(dirs)
+	if err != nil {
+		return err
+	}
+	// for each import path it might occur only in newdirs,
+	// only in w.oldIndex, or in both.
+	// If it occurs in both, use the semantically later one
+	if w.oldIndex != nil {
+		for _, e := range w.oldIndex.Entries {
+			found, ok := newdirs[e.ImportPath]
+			if !ok {
+				w.newIndex.Entries = append(w.newIndex.Entries, e)
+				continue // use this one, there is no new one
+			}
+			if semver.Compare(found[0].version, e.Version) > 0 {
+				// use the new one
+			} else {
+				// use the old one, forget the new one
+				w.newIndex.Entries = append(w.newIndex.Entries, e)
+				delete(newdirs, e.ImportPath)
+			}
+		}
+	}
+	// get symbol information for all the new diredtories
+	getSymbols(w.cacheDir, newdirs)
+	// assemble the new index entries
+	for k, v := range newdirs {
+		d := v[0]
+		pkg, names := processSyms(d.syms)
+		if pkg == "" {
+			continue // PJW: does this ever happen?
+		}
+		entry := Entry{
+			PkgName:    pkg,
+			Dir:        d.path,
+			ImportPath: k,
+			Version:    d.version,
+			Names:      names,
+		}
+		w.newIndex.Entries = append(w.newIndex.Entries, entry)
+	}
+	// sort the entries in the new index
+	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
+		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
+			return n
+		}
+		return strings.Compare(l.ImportPath, r.ImportPath)
+	})
+	return nil
+}
+
+func (w *work) writeIndex() error {
+	return writeIndex(w.cacheDir, w.newIndex)
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
new file mode 100644
index 00000000..b918529d
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -0,0 +1,218 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"fmt"
+	"go/ast"
+	"go/parser"
+	"go/token"
+	"go/types"
+	"os"
+	"path/filepath"
+	"runtime"
+	"slices"
+	"strings"
+
+	"golang.org/x/sync/errgroup"
+)
+
+// The name of a symbol contains information about the symbol:
+//  T for types, TD if the type is deprecated
+//  C for consts, CD if the const is deprecated
+//  V for vars, VD if the var is deprecated
+// and for funcs:  F  ( )*
+// any spaces in  are replaced by $s so that the fields
+// of the name are space separated. F is replaced by FD if the func
+// is deprecated.
+type symbol struct {
+	pkg  string // name of the symbols's package
+	name string // declared name
+	kind string // T, C, V, or F, follwed by D if deprecated
+	sig  string // signature information, for F
+}
+
+// find the symbols for the best directories
+func getSymbols(cd Abspath, dirs map[string][]*directory) {
+	var g errgroup.Group
+	g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
+	for _, vv := range dirs {
+		// throttling some day?
+		d := vv[0]
+		g.Go(func() error {
+			thedir := filepath.Join(string(cd), string(d.path))
+			mode := parser.SkipObjectResolution | parser.ParseComments
+
+			fi, err := os.ReadDir(thedir)
+			if err != nil {
+				return nil // log this someday?
+			}
+			for _, fx := range fi {
+				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
+					continue
+				}
+				fname := filepath.Join(thedir, fx.Name())
+				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
+				if err != nil {
+					continue // ignore errors, someday log them?
+				}
+				d.syms = append(d.syms, getFileExports(tr)...)
+			}
+			return nil
+		})
+	}
+	g.Wait()
+}
+
+func getFileExports(f *ast.File) []symbol {
+	pkg := f.Name.Name
+	if pkg == "main" {
+		return nil
+	}
+	var ans []symbol
+	// should we look for //go:build ignore?
+	for _, decl := range f.Decls {
+		switch decl := decl.(type) {
+		case *ast.FuncDecl:
+			if decl.Recv != nil {
+				// ignore methods, as we are completing package selections
+				continue
+			}
+			name := decl.Name.Name
+			dtype := decl.Type
+			// not looking at dtype.TypeParams. That is, treating
+			// generic functions just like non-generic ones.
+			sig := dtype.Params
+			kind := "F"
+			if isDeprecated(decl.Doc) {
+				kind += "D"
+			}
+			result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
+			for _, x := range sig.List {
+				// This code creates a string representing the type.
+				// TODO(pjw): it may be fragile:
+				// 1. x.Type could be nil, perhaps in ill-formed code
+				// 2. ExprString might someday change incompatibly to
+				//    include struct tags, which can be arbitrary strings
+				if x.Type == nil {
+					// Can this happen without a parse error? (Files with parse
+					// errors are ignored in getSymbols)
+					continue // maybe report this someday
+				}
+				tp := types.ExprString(x.Type)
+				if len(tp) == 0 {
+					// Can this happen?
+					continue // maybe report this someday
+				}
+				// This is only safe if ExprString never returns anything with a $
+				// The only place a $ can occur seems to be in a struct tag, which
+				// can be an arbitrary string literal, and ExprString does not presently
+				// print struct tags. So for this to happen the type of a formal parameter
+				// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
+				// would have to show the struct tag. Even testing for this case seems
+				// a waste of effort, but let's remember the possibility
+				if strings.Contains(tp, "$") {
+					continue
+				}
+				tp = strings.Replace(tp, " ", "$", -1)
+				if len(x.Names) == 0 {
+					result = append(result, "_")
+					result = append(result, tp)
+				} else {
+					for _, y := range x.Names {
+						result = append(result, y.Name)
+						result = append(result, tp)
+					}
+				}
+			}
+			sigs := strings.Join(result, " ")
+			if s := newsym(pkg, name, kind, sigs); s != nil {
+				ans = append(ans, *s)
+			}
+		case *ast.GenDecl:
+			depr := isDeprecated(decl.Doc)
+			switch decl.Tok {
+			case token.CONST, token.VAR:
+				tp := "V"
+				if decl.Tok == token.CONST {
+					tp = "C"
+				}
+				if depr {
+					tp += "D"
+				}
+				for _, sp := range decl.Specs {
+					for _, x := range sp.(*ast.ValueSpec).Names {
+						if s := newsym(pkg, x.Name, tp, ""); s != nil {
+							ans = append(ans, *s)
+						}
+					}
+				}
+			case token.TYPE:
+				tp := "T"
+				if depr {
+					tp += "D"
+				}
+				for _, sp := range decl.Specs {
+					if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil {
+						ans = append(ans, *s)
+					}
+				}
+			}
+		}
+	}
+	return ans
+}
+
+func newsym(pkg, name, kind, sig string) *symbol {
+	if len(name) == 0 || !ast.IsExported(name) {
+		return nil
+	}
+	sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
+	return &sym
+}
+
+func isDeprecated(doc *ast.CommentGroup) bool {
+	if doc == nil {
+		return false
+	}
+	// go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
+	// This code fails for /* Deprecated: */, but it's the code from
+	// gopls/internal/analysis/deprecated
+	lines := strings.Split(doc.Text(), "\n\n")
+	for _, line := range lines {
+		if strings.HasPrefix(line, "Deprecated:") {
+			return true
+		}
+	}
+	return false
+}
+
+// return the package name and the value for the symbols.
+// if there are multiple packages, choose one arbitrarily
+// the returned slice is sorted lexicographically
+func processSyms(syms []symbol) (string, []string) {
+	if len(syms) == 0 {
+		return "", nil
+	}
+	slices.SortFunc(syms, func(l, r symbol) int {
+		return strings.Compare(l.name, r.name)
+	})
+	pkg := syms[0].pkg
+	var names []string
+	for _, s := range syms {
+		var nx string
+		if s.pkg == pkg {
+			if s.sig != "" {
+				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
+			} else {
+				nx = fmt.Sprintf("%s %s", s.name, s.kind)
+			}
+			names = append(names, nx)
+		} else {
+			continue // PJW: do we want to keep track of these?
+		}
+	}
+	return pkg, names
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/types.go b/vendor/golang.org/x/tools/internal/modindex/types.go
new file mode 100644
index 00000000..ece44886
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/types.go
@@ -0,0 +1,25 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"strings"
+)
+
+// some special types to avoid confusions
+
+// distinguish various types of directory names. It's easy to get confused.
+type Abspath string // absolute paths
+type Relpath string // paths with GOMODCACHE prefix removed
+
+func toRelpath(cachedir Abspath, s string) Relpath {
+	if strings.HasPrefix(s, string(cachedir)) {
+		if s == string(cachedir) {
+			return Relpath("")
+		}
+		return Relpath(s[len(cachedir)+1:])
+	}
+	return Relpath(s)
+}
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 44719de1..25ebab66 100644
--- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -5,8 +5,7 @@
 // Package packagesinternal exposes internal-only fields from go/packages.
 package packagesinternal
 
-var GetForTest = func(p interface{}) string { return "" }
-var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
+var GetDepsErrors = func(p any) []*PackageError { return nil }
 
 type PackageError struct {
 	ImportStack []string // shortest path from package named on command line to this one
@@ -16,7 +15,6 @@ type PackageError struct {
 
 var TypecheckCgo int
 var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
-var ForTest int    // must be set as a LoadMode to call GetForTest
 
-var SetModFlag = func(config interface{}, value string) {}
-var SetModFile = func(config interface{}, value string) {}
+var SetModFlag = func(config any, value string) {}
+var SetModFile = func(config any, value string) {}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
index 2acd8585..c0aba26c 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
@@ -21,10 +21,7 @@ import (
 // export data.
 type PkgDecoder struct {
 	// version is the file format version.
-	version uint32
-
-	// aliases determines whether types.Aliases should be created
-	aliases bool
+	version Version
 
 	// sync indicates whether the file uses sync markers.
 	sync bool
@@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
 // NewPkgDecoder returns a PkgDecoder initialized to read the Unified
 // IR export data from input. pkgPath is the package path for the
 // compilation unit that produced the export data.
-//
-// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
 func NewPkgDecoder(pkgPath, input string) PkgDecoder {
 	pr := PkgDecoder{
 		pkgPath: pkgPath,
-		//aliases: aliases.Enabled(),
 	}
 
 	// TODO(mdempsky): Implement direct indexing of input string to
@@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
 
 	r := strings.NewReader(input)
 
-	assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
+	var ver uint32
+	assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
+	pr.version = Version(ver)
 
-	switch pr.version {
-	default:
-		panic(fmt.Errorf("unsupported version: %v", pr.version))
-	case 0:
-		// no flags
-	case 1:
+	if pr.version >= numVersions {
+		panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
+	}
+
+	if pr.version.Has(Flags) {
 		var flags uint32
 		assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
 		pr.sync = flags&flagSyncMarkers != 0
@@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
 	assert(err == nil)
 
 	pr.elemData = input[pos:]
-	assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+	const fingerprintSize = 8
+	assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
 
 	return pr
 }
@@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
 		absIdx += int(pr.elemEndsEnds[k-1])
 	}
 	if absIdx >= int(pr.elemEndsEnds[k]) {
-		errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+		panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
 	}
 	return absIdx
 }
@@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
 		Idx:    idx,
 	}
 
-	// TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
-	r.Data = *strings.NewReader(pr.DataIdx(k, idx))
-
+	r.Data.Reset(pr.DataIdx(k, idx))
 	r.Sync(SyncRelocs)
 	r.Relocs = make([]RelocEnt, r.Len())
 	for i := range r.Relocs {
@@ -248,7 +243,7 @@ type Decoder struct {
 
 func (r *Decoder) checkErr(err error) {
 	if err != nil {
-		errorf("unexpected decoding error: %w", err)
+		panicf("unexpected decoding error: %w", err)
 	}
 }
 
@@ -264,7 +259,7 @@ func (r *Decoder) rawUvarint() uint64 {
 func readUvarint(r *strings.Reader) (uint64, error) {
 	var x uint64
 	var s uint
-	for i := 0; i < binary.MaxVarintLen64; i++ {
+	for i := range binary.MaxVarintLen64 {
 		b, err := r.ReadByte()
 		if err != nil {
 			if i > 0 && err == io.EOF {
@@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
 
 	return path, name, tag
 }
+
+// Version reports the version of the bitstream.
+func (w *Decoder) Version() Version { return w.common.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
index 6482617a..c17a1239 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
@@ -12,18 +12,15 @@ import (
 	"io"
 	"math/big"
 	"runtime"
+	"strings"
 )
 
-// currentVersion is the current version number.
-//
-//   - v0: initial prototype
-//
-//   - v1: adds the flags uint32 word
-const currentVersion uint32 = 1
-
 // A PkgEncoder provides methods for encoding a package's Unified IR
 // export data.
 type PkgEncoder struct {
+	// version of the bitstream.
+	version Version
+
 	// elems holds the bitstream for previously encoded elements.
 	elems [numRelocs][]string
 
@@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
 // export data files, but can help diagnosing desync errors in
 // higher-level Unified IR reader/writer code. If syncFrames is
 // negative, then sync markers are omitted entirely.
-func NewPkgEncoder(syncFrames int) PkgEncoder {
+func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
 	return PkgEncoder{
+		version:    version,
 		stringsIdx: make(map[string]Index),
 		syncFrames: syncFrames,
 	}
@@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
 		assert(binary.Write(out, binary.LittleEndian, x) == nil)
 	}
 
-	writeUint32(currentVersion)
+	writeUint32(uint32(pw.version))
 
-	var flags uint32
-	if pw.SyncMarkers() {
-		flags |= flagSyncMarkers
+	if pw.version.Has(Flags) {
+		var flags uint32
+		if pw.SyncMarkers() {
+			flags |= flagSyncMarkers
+		}
+		writeUint32(flags)
 	}
-	writeUint32(flags)
 
 	// Write elemEndsEnds.
 	var sum uint32
@@ -159,7 +159,7 @@ type Encoder struct {
 
 // Flush finalizes the element's bitstream and returns its Index.
 func (w *Encoder) Flush() Index {
-	var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+	var sb strings.Builder
 
 	// Backup the data so we write the relocations at the front.
 	var tmp bytes.Buffer
@@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index {
 
 func (w *Encoder) checkErr(err error) {
 	if err != nil {
-		errorf("unexpected encoding error: %v", err)
+		panicf("unexpected encoding error: %v", err)
 	}
 }
 
@@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) {
 // section (if not already present), and then writing a relocation
 // into the element bitstream.
 func (w *Encoder) String(s string) {
+	w.StringRef(w.p.StringIdx(s))
+}
+
+// StringRef writes a reference to the given index, which must be a
+// previously encoded string value.
+func (w *Encoder) StringRef(idx Index) {
 	w.Sync(SyncString)
-	w.Reloc(RelocString, w.p.StringIdx(s))
+	w.Reloc(RelocString, idx)
 }
 
 // Strings encodes and writes a variable-length slice of strings into
@@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) {
 func (w *Encoder) scalar(val constant.Value) {
 	switch v := constant.Val(val).(type) {
 	default:
-		errorf("unhandled %v (%v)", val, val.Kind())
+		panicf("unhandled %v (%v)", val, val.Kind())
 	case bool:
 		w.Code(ValBool)
 		w.Bool(v)
@@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) {
 	b := v.Append(nil, 'p', -1)
 	w.String(string(b)) // TODO: More efficient encoding.
 }
+
+// Version reports the version of the bitstream.
+func (w *Encoder) Version() Version { return w.p.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
deleted file mode 100644
index 5294f6a6..00000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.7
-// +build !go1.7
-
-// TODO(mdempsky): Remove after #44505 is resolved
-
-package pkgbits
-
-import "runtime"
-
-func walkFrames(pcs []uintptr, visit frameVisitor) {
-	for _, pc := range pcs {
-		fn := runtime.FuncForPC(pc)
-		file, line := fn.FileLine(pc)
-
-		visit(file, line, fn.Name(), pc-fn.Entry())
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
deleted file mode 100644
index 2324ae7a..00000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.7
-// +build go1.7
-
-package pkgbits
-
-import "runtime"
-
-// walkFrames calls visit for each call frame represented by pcs.
-//
-// pcs should be a slice of PCs, as returned by runtime.Callers.
-func walkFrames(pcs []uintptr, visit frameVisitor) {
-	if len(pcs) == 0 {
-		return
-	}
-
-	frames := runtime.CallersFrames(pcs)
-	for {
-		frame, more := frames.Next()
-		visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
-		if !more {
-			return
-		}
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go
index ad26d3b2..50534a29 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/support.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go
@@ -12,6 +12,6 @@ func assert(b bool) {
 	}
 }
 
-func errorf(format string, args ...interface{}) {
+func panicf(format string, args ...any) {
 	panic(fmt.Errorf(format, args...))
 }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
index 5bd51ef7..1520b73a 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
@@ -6,6 +6,7 @@ package pkgbits
 
 import (
 	"fmt"
+	"runtime"
 	"strings"
 )
 
@@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string {
 
 type frameVisitor func(file string, line int, name string, offset uintptr)
 
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+	if len(pcs) == 0 {
+		return
+	}
+
+	frames := runtime.CallersFrames(pcs)
+	for {
+		frame, more := frames.Next()
+		visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+		if !more {
+			return
+		}
+	}
+}
+
 // SyncMarker is an enum type that represents markers that may be
 // written to export data to ensure the reader and writer stay
 // synchronized.
@@ -110,4 +129,8 @@ const (
 	SyncStmtsEnd
 	SyncLabel
 	SyncOptLabel
+
+	SyncMultiExpr
+	SyncRType
+	SyncConvRTTI
 )
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
index 4a5b0ca5..582ad56d 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
@@ -74,11 +74,14 @@ func _() {
 	_ = x[SyncStmtsEnd-64]
 	_ = x[SyncLabel-65]
 	_ = x[SyncOptLabel-66]
+	_ = x[SyncMultiExpr-67]
+	_ = x[SyncRType-68]
+	_ = x[SyncConvRTTI-69]
 }
 
-const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
 
-var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
 
 func (i SyncMarker) String() string {
 	i -= 1
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go
new file mode 100644
index 00000000..53af9df2
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go
@@ -0,0 +1,85 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// Version indicates a version of a unified IR bitstream.
+// Each Version indicates the addition, removal, or change of
+// new data in the bitstream.
+//
+// These are serialized to disk and the interpretation remains fixed.
+type Version uint32
+
+const (
+	// V0: initial prototype.
+	//
+	// All data that is not assigned a Field is in version V0
+	// and has not been deprecated.
+	V0 Version = iota
+
+	// V1: adds the Flags uint32 word
+	V1
+
+	// V2: removes unused legacy fields and supports type parameters for aliases.
+	// - remove the legacy "has init" bool from the public root
+	// - remove obj's "derived func instance" bool
+	// - add a TypeParamNames field to ObjAlias
+	// - remove derived info "needed" bool
+	V2
+
+	numVersions = iota
+)
+
+// Field denotes a unit of data in the serialized unified IR bitstream.
+// It is conceptually a like field in a structure.
+//
+// We only really need Fields when the data may or may not be present
+// in a stream based on the Version of the bitstream.
+//
+// Unlike much of pkgbits, Fields are not serialized and
+// can change values as needed.
+type Field int
+
+const (
+	// Flags in a uint32 in the header of a bitstream
+	// that is used to indicate whether optional features are enabled.
+	Flags Field = iota
+
+	// Deprecated: HasInit was a bool indicating whether a package
+	// has any init functions.
+	HasInit
+
+	// Deprecated: DerivedFuncInstance was a bool indicating
+	// whether an object was a function instance.
+	DerivedFuncInstance
+
+	// ObjAlias has a list of TypeParamNames.
+	AliasTypeParamNames
+
+	// Deprecated: DerivedInfoNeeded was a bool indicating
+	// whether a type was a derived type.
+	DerivedInfoNeeded
+
+	numFields = iota
+)
+
+// introduced is the version a field was added.
+var introduced = [numFields]Version{
+	Flags:               V1,
+	AliasTypeParamNames: V2,
+}
+
+// removed is the version a field was removed in or 0 for fields
+// that have not yet been deprecated.
+// (So removed[f]-1 is the last version it is included in.)
+var removed = [numFields]Version{
+	HasInit:             V2,
+	DerivedFuncInstance: V2,
+	DerivedInfoNeeded:   V2,
+}
+
+// Has reports whether field f is present in a bitstream at version v.
+func (v Version) Has(f Field) bool {
+	return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go
new file mode 100644
index 00000000..c50bf406
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go
@@ -0,0 +1,359 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package stdlib
+
+type pkginfo struct {
+	name string
+	deps string // list of indices of dependencies, as varint-encoded deltas
+}
+
+var deps = [...]pkginfo{
+	{"archive/tar", "\x03j\x03E6\x01\v\x01\"\x01\x01\x02\x05\n\x02\x01\x02\x02\v"},
+	{"archive/zip", "\x02\x04`\a\x16\x0206\x01*\x05\x01\x11\x03\x02\r\x04"},
+	{"bufio", "\x03j~E\x13"},
+	{"bytes", "m+S\x03\fG\x02\x02"},
+	{"cmp", ""},
+	{"compress/bzip2", "\x02\x02\xe7\x01B"},
+	{"compress/flate", "\x02k\x03{\r\x024\x01\x03"},
+	{"compress/gzip", "\x02\x04`\a\x03\x15fT"},
+	{"compress/lzw", "\x02k\x03{"},
+	{"compress/zlib", "\x02\x04`\a\x03\x13\x01g"},
+	{"container/heap", "\xae\x02"},
+	{"container/list", ""},
+	{"container/ring", ""},
+	{"context", "m\\i\x01\f"},
+	{"crypto", "\x83\x01hD"},
+	{"crypto/aes", "\x10\n\a\x8e\x02"},
+	{"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1c,R"},
+	{"crypto/des", "\x10\x13\x1d-,\x96\x01\x03"},
+	{"crypto/dsa", "@\x04)~\x0e"},
+	{"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1c~"},
+	{"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1c~\x0e\x04K\x01"},
+	{"crypto/ed25519", "\x0e\x1c\x16\n\a\x1c~D"},
+	{"crypto/elliptic", "0=~\x0e9"},
+	{"crypto/fips140", " \x05\x90\x01"},
+	{"crypto/hkdf", "-\x12\x01-\x16"},
+	{"crypto/hmac", "\x1a\x14\x11\x01\x112"},
+	{"crypto/internal/boring", "\x0e\x02\rf"},
+	{"crypto/internal/boring/bbig", "\x1a\xdf\x01L"},
+	{"crypto/internal/boring/bcache", "\xb3\x02\x12"},
+	{"crypto/internal/boring/sig", ""},
+	{"crypto/internal/cryptotest", "\x03\r\n)\x0e\x19\x06\x13\x12#\a\t\x11\x12\x11\x1a\r\r\x05\n"},
+	{"crypto/internal/entropy", "E"},
+	{"crypto/internal/fips140", ">/~8\r\x15"},
+	{"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05*\x8d\x015"},
+	{"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06*\x8b\x01"},
+	{"crypto/internal/fips140/alias", "\xc5\x02"},
+	{"crypto/internal/fips140/bigmod", "%\x17\x01\x06*\x8d\x01"},
+	{"crypto/internal/fips140/check", " \x0e\x06\b\x02\xad\x01Z"},
+	{"crypto/internal/fips140/check/checktest", "%\xfe\x01\""},
+	{"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01(~\x0f8"},
+	{"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f1~\x0f8"},
+	{"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x067~G"},
+	{"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v7\xc2\x01\x03"},
+	{"crypto/internal/fips140/edwards25519", "%\a\f\x041\x8d\x018"},
+	{"crypto/internal/fips140/edwards25519/field", "%\x13\x041\x8d\x01"},
+	{"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x069"},
+	{"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x017"},
+	{"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x041"},
+	{"crypto/internal/fips140/nistec", "%\f\a\x041\x8d\x01)\x0f\x13"},
+	{"crypto/internal/fips140/nistec/fiat", "%\x135\x8d\x01"},
+	{"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x069"},
+	{"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x025~G"},
+	{"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06*\x8d\x01"},
+	{"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x010\x8d\x01K"},
+	{"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06*\x8d\x01"},
+	{"crypto/internal/fips140/ssh", " \x05"},
+	{"crypto/internal/fips140/subtle", "#"},
+	{"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x027"},
+	{"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b1"},
+	{"crypto/internal/fips140deps", ""},
+	{"crypto/internal/fips140deps/byteorder", "\x99\x01"},
+	{"crypto/internal/fips140deps/cpu", "\xad\x01\a"},
+	{"crypto/internal/fips140deps/godebug", "\xb5\x01"},
+	{"crypto/internal/fips140hash", "5\x1a4\xc2\x01"},
+	{"crypto/internal/fips140only", "'\r\x01\x01M26"},
+	{"crypto/internal/fips140test", ""},
+	{"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d#,aM"},
+	{"crypto/internal/impl", "\xb0\x02"},
+	{"crypto/internal/randutil", "\xeb\x01\x12"},
+	{"crypto/internal/sysrand", "mi\"\x1e\r\x0f\x01\x01\v\x06"},
+	{"crypto/internal/sysrand/internal/seccomp", "m"},
+	{"crypto/md5", "\x0e2-\x16\x16a"},
+	{"crypto/mlkem", "/"},
+	{"crypto/pbkdf2", "2\r\x01-\x16"},
+	{"crypto/rand", "\x1a\x06\a\x19\x04\x01(~\x0eL"},
+	{"crypto/rc4", "#\x1d-\xc2\x01"},
+	{"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1c\x03\x1326\r\x01"},
+	{"crypto/sha1", "\x0e\f&-\x16\x16\x14M"},
+	{"crypto/sha256", "\x0e\f\x1aO"},
+	{"crypto/sha3", "\x0e'N\xc2\x01"},
+	{"crypto/sha512", "\x0e\f\x1cM"},
+	{"crypto/subtle", "8\x96\x01U"},
+	{"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x14\b6\x16\x15\r\n\x01\x01\x01\x02\x01\f\x06\x02\x01"},
+	{"crypto/tls/internal/fips140tls", " \x93\x02"},
+	{"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x033\x01\x02\t\x01\x01\x01\a\x0f\x05\x01\x06\x02\x05\f\x01\x02\r\x02\x01\x01\x02\x03\x01"},
+	{"crypto/x509/pkix", "c\x06\a\x89\x01F"},
+	{"database/sql", "\x03\nJ\x16\x03{\f\x06!\x05\n\x02\x03\x01\f\x02\x02\x02"},
+	{"database/sql/driver", "\r`\x03\xae\x01\x11\x10"},
+	{"debug/buildinfo", "\x03W\x02\x01\x01\b\a\x03`\x19\x02\x01*\x0f "},
+	{"debug/dwarf", "\x03c\a\x03{0\x13\x01\x01"},
+	{"debug/elf", "\x03\x06P\r\a\x03`\x1a\x01+\x19\x01\x15"},
+	{"debug/gosym", "\x03c\n\xbe\x01\x01\x01\x02"},
+	{"debug/macho", "\x03\x06P\r\n`\x1b+\x19\x01"},
+	{"debug/pe", "\x03\x06P\r\a\x03`\x1b+\x19\x01\x15"},
+	{"debug/plan9obj", "f\a\x03`\x1b+"},
+	{"embed", "m+:\x19\x01S"},
+	{"embed/internal/embedtest", ""},
+	{"encoding", ""},
+	{"encoding/ascii85", "\xeb\x01D"},
+	{"encoding/asn1", "\x03j\x03\x88\x01\x01%\x0f\x02\x01\x0f\x03\x01"},
+	{"encoding/base32", "\xeb\x01B\x02"},
+	{"encoding/base64", "f\x85\x01B\x02"},
+	{"encoding/binary", "m~\r&\x0f\x05"},
+	{"encoding/csv", "\x02\x01j\x03{E\x11\x02"},
+	{"encoding/gob", "\x02_\x05\a\x03`\x1b\f\x01\x02\x1c\b\x14\x01\x0e\x02"},
+	{"encoding/hex", "m\x03{B\x03"},
+	{"encoding/json", "\x03\x01]\x04\b\x03{\r&\x0f\x02\x01\x02\x0f\x01\x01\x02"},
+	{"encoding/pem", "\x03b\b~B\x03"},
+	{"encoding/xml", "\x02\x01^\f\x03{3\x05\f\x01\x02\x0f\x02"},
+	{"errors", "\xc9\x01|"},
+	{"expvar", "jK:\t\n\x14\r\n\x02\x03\x01\x10"},
+	{"flag", "a\f\x03{+\b\x05\n\x02\x01\x0f"},
+	{"fmt", "mE9\r\x1e\b\x0f\x02\x03\x11"},
+	{"go/ast", "\x03\x01l\x0f\x01k\x03(\b\x0f\x02\x01"},
+	{"go/ast/internal/tests", ""},
+	{"go/build", "\x02\x01j\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x13\x01*\x01\x04\x01\a\n\x02\x01\x11\x02\x02"},
+	{"go/build/constraint", "m\xc2\x01\x01\x11\x02"},
+	{"go/constant", "p\x10x\x01\x015\x01\x02\x11"},
+	{"go/doc", "\x04l\x01\x06\t=.0\x12\x02\x01\x11\x02"},
+	{"go/doc/comment", "\x03m\xbd\x01\x01\x01\x01\x11\x02"},
+	{"go/format", "\x03m\x01\f\x01\x02kE"},
+	{"go/importer", "s\a\x01\x01\x04\x01j8"},
+	{"go/internal/gccgoimporter", "\x02\x01W\x13\x03\x05\v\x01h\x02+\x01\x05\x13\x01\v\b"},
+	{"go/internal/gcimporter", "\x02n\x10\x01/\x05\x0e(+\x17\x03\x02"},
+	{"go/internal/srcimporter", "p\x01\x02\n\x03\x01j+\x01\x05\x14\x02\x13"},
+	{"go/parser", "\x03j\x03\x01\x03\v\x01k\x01*\x06\x14"},
+	{"go/printer", "p\x01\x03\x03\tk\r\x1e\x17\x02\x01\x02\n\x05\x02"},
+	{"go/scanner", "\x03m\x10k1\x12\x01\x12\x02"},
+	{"go/token", "\x04l\xbd\x01\x02\x03\x01\x0e\x02"},
+	{"go/types", "\x03\x01\x06c\x03\x01\x04\b\x03\x02\x15\x1e\x06,\x04\x03\n$\a\n\x01\x01\x01\x02\x01\x0e\x02\x02"},
+	{"go/version", "\xba\x01v"},
+	{"hash", "\xeb\x01"},
+	{"hash/adler32", "m\x16\x16"},
+	{"hash/crc32", "m\x16\x16\x14\x85\x01\x01\x12"},
+	{"hash/crc64", "m\x16\x16\x99\x01"},
+	{"hash/fnv", "m\x16\x16a"},
+	{"hash/maphash", "\x94\x01\x05\x1b\x03AM"},
+	{"html", "\xb0\x02\x02\x11"},
+	{"html/template", "\x03g\x06\x19,6\x01\v\x1f\x05\x01\x02\x03\x0e\x01\x02\v\x01\x03\x02"},
+	{"image", "\x02k\x1f_\x0f5\x03\x01"},
+	{"image/color", ""},
+	{"image/color/palette", "\x8c\x01"},
+	{"image/draw", "\x8b\x01\x01\x04"},
+	{"image/gif", "\x02\x01\x05e\x03\x1b\x01\x01\x01\vR"},
+	{"image/internal/imageutil", "\x8b\x01"},
+	{"image/jpeg", "\x02k\x1e\x01\x04["},
+	{"image/png", "\x02\a]\n\x13\x02\x06\x01_D"},
+	{"index/suffixarray", "\x03c\a~\r)\f\x01"},
+	{"internal/abi", "\xb4\x01\x91\x01"},
+	{"internal/asan", "\xc5\x02"},
+	{"internal/bisect", "\xa3\x02\x0f\x01"},
+	{"internal/buildcfg", "pG_\x06\x02\x05\f\x01"},
+	{"internal/bytealg", "\xad\x01\x98\x01"},
+	{"internal/byteorder", ""},
+	{"internal/cfg", ""},
+	{"internal/chacha8rand", "\x99\x01\x1b\x91\x01"},
+	{"internal/copyright", ""},
+	{"internal/coverage", ""},
+	{"internal/coverage/calloc", ""},
+	{"internal/coverage/cfile", "j\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x01 +\x06\a\f\x01\x03\f\x06"},
+	{"internal/coverage/cformat", "\x04l-\x04J\f6\x01\x02\f"},
+	{"internal/coverage/cmerge", "p-["},
+	{"internal/coverage/decodecounter", "f\n-\v\x02A+\x19\x16"},
+	{"internal/coverage/decodemeta", "\x02d\n\x17\x16\v\x02A+"},
+	{"internal/coverage/encodecounter", "\x02d\n-\f\x01\x02?\f\x1f\x17"},
+	{"internal/coverage/encodemeta", "\x02\x01c\n\x13\x04\x16\r\x02?+/"},
+	{"internal/coverage/pods", "\x04l-y\x06\x05\f\x02\x01"},
+	{"internal/coverage/rtcov", "\xc5\x02"},
+	{"internal/coverage/slicereader", "f\n{Z"},
+	{"internal/coverage/slicewriter", "p{"},
+	{"internal/coverage/stringtab", "p8\x04?"},
+	{"internal/coverage/test", ""},
+	{"internal/coverage/uleb128", ""},
+	{"internal/cpu", "\xc5\x02"},
+	{"internal/dag", "\x04l\xbd\x01\x03"},
+	{"internal/diff", "\x03m\xbe\x01\x02"},
+	{"internal/exportdata", "\x02\x01j\x03\x03]\x1b+\x01\x05\x13\x01\x02"},
+	{"internal/filepathlite", "m+:\x1aA"},
+	{"internal/fmtsort", "\x04\x9a\x02\x0f"},
+	{"internal/fuzz", "\x03\nA\x18\x04\x03\x03\x01\f\x0356\r\x02\x1c\x01\x05\x02\x05\f\x01\x02\x01\x01\v\x04\x02"},
+	{"internal/goarch", ""},
+	{"internal/godebug", "\x96\x01 |\x01\x12"},
+	{"internal/godebugs", ""},
+	{"internal/goexperiment", ""},
+	{"internal/goos", ""},
+	{"internal/goroot", "\x96\x02\x01\x05\x14\x02"},
+	{"internal/gover", "\x04"},
+	{"internal/goversion", ""},
+	{"internal/itoa", ""},
+	{"internal/lazyregexp", "\x96\x02\v\x0f\x02"},
+	{"internal/lazytemplate", "\xeb\x01+\x1a\x02\v"},
+	{"internal/msan", "\xc5\x02"},
+	{"internal/nettrace", ""},
+	{"internal/obscuretestdata", "e\x86\x01+"},
+	{"internal/oserror", "m"},
+	{"internal/pkgbits", "\x03K\x18\a\x03\x05\vk\x0e\x1d\r\f\x01"},
+	{"internal/platform", ""},
+	{"internal/poll", "mO\x1a\x158\x0f\x01\x01\v\x06"},
+	{"internal/profile", "\x03\x04f\x03{6\r\x01\x01\x0f"},
+	{"internal/profilerecord", ""},
+	{"internal/race", "\x94\x01\xb1\x01"},
+	{"internal/reflectlite", "\x94\x01 4;\""},
+	{"internal/runtime/atomic", "\xc5\x02"},
+	{"internal/runtime/exithook", "\xca\x01{"},
+	{"internal/runtime/maps", "\x94\x01\x01\x1f\v\t\x05\x01w"},
+	{"internal/runtime/math", "\xb4\x01"},
+	{"internal/runtime/sys", "\xb4\x01\x04"},
+	{"internal/runtime/syscall", "\xc5\x02"},
+	{"internal/saferio", "\xeb\x01Z"},
+	{"internal/singleflight", "\xb2\x02"},
+	{"internal/stringslite", "\x98\x01\xad\x01"},
+	{"internal/sync", "\x94\x01 \x14k\x12"},
+	{"internal/synctest", "\xc5\x02"},
+	{"internal/syscall/execenv", "\xb4\x02"},
+	{"internal/syscall/unix", "\xa3\x02\x10\x01\x11"},
+	{"internal/sysinfo", "\x02\x01\xaa\x01>+\x1a\x02"},
+	{"internal/syslist", ""},
+	{"internal/testenv", "\x03\n`\x02\x01*\x1a\x10(*\x01\x05\a\f\x01\x02\x02\x01\n"},
+	{"internal/testlog", "\xb2\x02\x01\x12"},
+	{"internal/testpty", "m\x03\xa6\x01"},
+	{"internal/trace", "\x02\x01\x01\x06\\\a\x03m\x01\x01\x06\x06\x03\n5\x01\x02\x0f"},
+	{"internal/trace/event", ""},
+	{"internal/trace/event/go122", "pm"},
+	{"internal/trace/internal/oldtrace", "\x03\x01b\a\x03m\b\x06\r5\x01"},
+	{"internal/trace/internal/testgen/go122", "\x03c\nl\x01\x01\x03\x04\x010\v\x0f"},
+	{"internal/trace/raw", "\x02d\nm\b\x06D\x01\x11"},
+	{"internal/trace/testtrace", "\x02\x01j\x03l\x05\x05\x056\f\x02\x01"},
+	{"internal/trace/traceviewer", "\x02]\v\x06\x1a<\x16\b\a\x04\t\n\x14\x01\x05\a\f\x01\x02\r"},
+	{"internal/trace/traceviewer/format", ""},
+	{"internal/trace/version", "pm\x01\r"},
+	{"internal/txtar", "\x03m\xa6\x01\x1a"},
+	{"internal/types/errors", "\xaf\x02"},
+	{"internal/unsafeheader", "\xc5\x02"},
+	{"internal/xcoff", "Y\r\a\x03`\x1b+\x19\x01"},
+	{"internal/zstd", "f\a\x03{\x0f"},
+	{"io", "m\xc5\x01"},
+	{"io/fs", "m+*)0\x12\x12\x04"},
+	{"io/ioutil", "\xeb\x01\x01*\x17\x03"},
+	{"iter", "\xc8\x01[\""},
+	{"log", "p{\x05&\r\x0f\x01\f"},
+	{"log/internal", ""},
+	{"log/slog", "\x03\nT\t\x03\x03{\x04\x01\x02\x02\x04&\x05\n\x02\x01\x02\x01\f\x02\x02\x02"},
+	{"log/slog/internal", ""},
+	{"log/slog/internal/benchmarks", "\r`\x03{\x06\x03;\x10"},
+	{"log/slog/internal/buffer", "\xb2\x02"},
+	{"log/slog/internal/slogtest", "\xf1\x01"},
+	{"log/syslog", "m\x03\x7f\x12\x15\x1a\x02\r"},
+	{"maps", "\xee\x01W"},
+	{"math", "\xad\x01MK"},
+	{"math/big", "\x03j\x03)\x14>\r\x02\x023\x01\x02\x13"},
+	{"math/bits", "\xc5\x02"},
+	{"math/cmplx", "\xf8\x01\x02"},
+	{"math/rand", "\xb5\x01C:\x01\x12"},
+	{"math/rand/v2", "m,\x02]\x02K"},
+	{"mime", "\x02\x01b\b\x03{\f\x1f\x17\x03\x02\x0f\x02"},
+	{"mime/multipart", "\x02\x01G#\x03E6\f\x01\x06\x02\x14\x02\x06\x11\x02\x01\x15"},
+	{"mime/quotedprintable", "\x02\x01m{"},
+	{"net", "\x04\t`+\x1d\a\x04\x05\f\x01\x04\x15\x01$\x06\r\n\x05\x01\x01\v\x06\a"},
+	{"net/http", "\x02\x01\x04\x04\x02=\b\x13\x01\a\x03E6\x01\x03\b\x01\x02\x02\x02\x01\x02\x06\x02\x01\n\x01\x01\x05\x01\x02\x05\n\x01\x01\x01\x02\x01\f\x02\x02\x02\b\x01\x01\x01"},
+	{"net/http/cgi", "\x02P\x1b\x03{\x04\b\n\x01\x12\x01\x01\x01\x04\x01\x05\x02\n\x02\x01\x0f\x0e"},
+	{"net/http/cookiejar", "\x04i\x03\x91\x01\x01\b\v\x18\x03\x02\r\x04"},
+	{"net/http/fcgi", "\x02\x01\nY\a\x03{\x16\x01\x01\x13\x1a\x02\r"},
+	{"net/http/httptest", "\x02\x01\nE\x02\x1b\x01{\x04\x12\x01\t\t\x02\x19\x01\x02\r\x0e"},
+	{"net/http/httptrace", "\rEnA\x13\n!"},
+	{"net/http/httputil", "\x02\x01\n`\x03{\x04\x0f\x03\x01\x05\x02\x01\n\x01\x1b\x02\r\x0e"},
+	{"net/http/internal", "\x02\x01j\x03{"},
+	{"net/http/internal/ascii", "\xb0\x02\x11"},
+	{"net/http/internal/testcert", "\xb0\x02"},
+	{"net/http/pprof", "\x02\x01\nc\x19,\x11%\x04\x13\x13\x01\r\x06\x03\x01\x02\x01\x0f"},
+	{"net/internal/cgotest", ""},
+	{"net/internal/socktest", "p\xc2\x01\x02"},
+	{"net/mail", "\x02k\x03{\x04\x0f\x03\x13\x1c\x02\r\x04"},
+	{"net/netip", "\x04i+\x01#<\x025\x15"},
+	{"net/rpc", "\x02f\x05\x03\x10\na\x04\x12\x01\x1c\x0f\x03\x02"},
+	{"net/rpc/jsonrpc", "j\x03\x03{\x16\x10!"},
+	{"net/smtp", "\x19.\v\x13\b\x03{\x16\x13\x1c"},
+	{"net/textproto", "\x02\x01j\x03{\r\t.\x01\x02\x13"},
+	{"net/url", "m\x03\x87\x01$\x12\x02\x01\x15"},
+	{"os", "m+\x01\x18\x03\b\t\r\x03\x01\x04\x11\x017\n\x05\x01\x01\v\x06"},
+	{"os/exec", "\x03\n`H \x01\x15\x01*\x06\a\f\x01\x04\v"},
+	{"os/exec/internal/fdtest", "\xb4\x02"},
+	{"os/signal", "\r\x89\x02\x17\x05\x02"},
+	{"os/user", "\x02\x01j\x03{+\r\f\x01\x02"},
+	{"path", "m+\xab\x01"},
+	{"path/filepath", "m+\x19;*\r\n\x03\x04\x0f"},
+	{"plugin", "m"},
+	{"reflect", "m'\x04\x1c\b\f\x04\x02\x1a\x06\n+\f\x03\x0f\x02\x02"},
+	{"reflect/internal/example1", ""},
+	{"reflect/internal/example2", ""},
+	{"regexp", "\x03\xe8\x017\v\x02\x01\x02\x0f\x02"},
+	{"regexp/syntax", "\xad\x02\x01\x01\x01\x11\x02"},
+	{"runtime", "\x94\x01\x04\x01\x02\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x03s"},
+	{"runtime/coverage", "\x9f\x01L"},
+	{"runtime/debug", "pUQ\r\n\x02\x01\x0f\x06"},
+	{"runtime/internal/startlinetest", ""},
+	{"runtime/internal/wasitest", ""},
+	{"runtime/metrics", "\xb6\x01B+\""},
+	{"runtime/pprof", "\x02\x01\x01\x03\x06Y\a\x03$3$\r\x1e\r\n\x01\x01\x01\x02\x02\b\x03\x06"},
+	{"runtime/race", "\xab\x02"},
+	{"runtime/race/internal/amd64v1", ""},
+	{"runtime/trace", "\rc{8\x0f\x01\x12"},
+	{"slices", "\x04\xea\x01\fK"},
+	{"sort", "\xc9\x0113"},
+	{"strconv", "m+:&\x02I"},
+	{"strings", "m'\x04:\x19\x03\f8\x0f\x02\x02"},
+	{"structs", ""},
+	{"sync", "\xc8\x01\vP\x10\x12"},
+	{"sync/atomic", "\xc5\x02"},
+	{"syscall", "m(\x03\x01\x1b\b\x03\x03\x06\aT\x0f\x01\x12"},
+	{"testing", "\x03\n`\x02\x01G\x11\x0f\x14\r\x04\x1a\x06\x02\x05\x02\a\x01\x02\x01\x02\x01\f\x02\x02\x02"},
+	{"testing/fstest", "m\x03{\x01\v$\x12\x03\b\a"},
+	{"testing/internal/testdeps", "\x02\v\xa6\x01'\x11+\x03\x05\x03\b\a\x02\r"},
+	{"testing/iotest", "\x03j\x03{\x04"},
+	{"testing/quick", "o\x01\x88\x01\x04\"\x12\x0f"},
+	{"testing/slogtest", "\r`\x03\x81\x01-\x05\x12\n"},
+	{"text/scanner", "\x03m{++\x02"},
+	{"text/tabwriter", "p{X"},
+	{"text/template", "m\x03B9\x01\v\x1e\x01\x05\x01\x02\x05\r\x02\f\x03\x02"},
+	{"text/template/parse", "\x03m\xb3\x01\f\x01\x11\x02"},
+	{"time", "m+\x1d\x1d()\x0f\x02\x11"},
+	{"time/tzdata", "m\xc7\x01\x11"},
+	{"unicode", ""},
+	{"unicode/utf16", ""},
+	{"unicode/utf8", ""},
+	{"unique", "\x94\x01>\x01P\x0f\x13\x12"},
+	{"unsafe", ""},
+	{"vendor/golang.org/x/crypto/chacha20", "\x10V\a\x8d\x01)'"},
+	{"vendor/golang.org/x/crypto/chacha20poly1305", "\x10V\a\xd9\x01\x04\x01\a"},
+	{"vendor/golang.org/x/crypto/cryptobyte", "c\n\x03\x89\x01%!\n"},
+	{"vendor/golang.org/x/crypto/cryptobyte/asn1", ""},
+	{"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"},
+	{"vendor/golang.org/x/crypto/internal/poly1305", "Q\x15\x94\x01"},
+	{"vendor/golang.org/x/net/dns/dnsmessage", "m"},
+	{"vendor/golang.org/x/net/http/httpguts", "\x81\x02\x13\x1c\x13\r"},
+	{"vendor/golang.org/x/net/http/httpproxy", "m\x03\x91\x01\x0f\x05\x01\x1a\x13\r"},
+	{"vendor/golang.org/x/net/http2/hpack", "\x03j\x03{G"},
+	{"vendor/golang.org/x/net/idna", "p\x88\x018\x13\x10\x02\x01"},
+	{"vendor/golang.org/x/net/nettest", "\x03c\a\x03{\x11\x05\x15\x01\f\f\x01\x02\x02\x01\n"},
+	{"vendor/golang.org/x/sys/cpu", "\x96\x02\r\f\x01\x15"},
+	{"vendor/golang.org/x/text/secure/bidirule", "m\xd6\x01\x11\x01"},
+	{"vendor/golang.org/x/text/transform", "\x03j~X"},
+	{"vendor/golang.org/x/text/unicode/bidi", "\x03\be\x7f?\x15"},
+	{"vendor/golang.org/x/text/unicode/norm", "f\n{G\x11\x11"},
+	{"weak", "\x94\x01\x8f\x01\""},
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go
new file mode 100644
index 00000000..f6909878
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/import.go
@@ -0,0 +1,89 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stdlib
+
+// This file provides the API for the import graph of the standard library.
+//
+// Be aware that the compiler-generated code for every package
+// implicitly depends on package "runtime" and a handful of others
+// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go).
+
+import (
+	"encoding/binary"
+	"iter"
+	"slices"
+	"strings"
+)
+
+// Imports returns the sequence of packages directly imported by the
+// named standard packages, in name order.
+// The imports of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Imports(pkgs ...string) iter.Seq[string] {
+	return func(yield func(string) bool) {
+		for _, pkg := range pkgs {
+			if i, ok := find(pkg); ok {
+				var depIndex uint64
+				for data := []byte(deps[i].deps); len(data) > 0; {
+					delta, n := binary.Uvarint(data)
+					depIndex += delta
+					if !yield(deps[depIndex].name) {
+						return
+					}
+					data = data[n:]
+				}
+			}
+		}
+	}
+}
+
+// Dependencies returns the set of all dependencies of the named
+// standard packages, including the initial package,
+// in a deterministic topological order.
+// The dependencies of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Dependencies(pkgs ...string) iter.Seq[string] {
+	return func(yield func(string) bool) {
+		for _, pkg := range pkgs {
+			if i, ok := find(pkg); ok {
+				var seen [1 + len(deps)/8]byte // bit set of seen packages
+				var visit func(i int) bool
+				visit = func(i int) bool {
+					bit := byte(1) << (i % 8)
+					if seen[i/8]&bit == 0 {
+						seen[i/8] |= bit
+						var depIndex uint64
+						for data := []byte(deps[i].deps); len(data) > 0; {
+							delta, n := binary.Uvarint(data)
+							depIndex += delta
+							if !visit(int(depIndex)) {
+								return false
+							}
+							data = data[n:]
+						}
+						if !yield(deps[i].name) {
+							return false
+						}
+					}
+					return true
+				}
+				if !visit(i) {
+					return
+				}
+			}
+		}
+	}
+}
+
+// find returns the index of pkg in the deps table.
+func find(pkg string) (int, bool) {
+	return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int {
+		return strings.Compare(p.name, n)
+	})
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index fd689207..2b418796 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Go Authors. All rights reserved.
+// Copyright 2025 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -23,6 +23,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ErrWriteAfterClose", Var, 0},
 		{"ErrWriteTooLong", Var, 0},
 		{"FileInfoHeader", Func, 1},
+		{"FileInfoNames", Type, 23},
 		{"Format", Type, 10},
 		{"FormatGNU", Const, 10},
 		{"FormatPAX", Const, 10},
@@ -267,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"ErrTooLarge", Var, 0},
 		{"Fields", Func, 0},
 		{"FieldsFunc", Func, 0},
+		{"FieldsFuncSeq", Func, 24},
+		{"FieldsSeq", Func, 24},
 		{"HasPrefix", Func, 0},
 		{"HasSuffix", Func, 0},
 		{"Index", Func, 0},
@@ -279,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"LastIndexAny", Func, 0},
 		{"LastIndexByte", Func, 5},
 		{"LastIndexFunc", Func, 0},
+		{"Lines", Func, 24},
 		{"Map", Func, 0},
 		{"MinRead", Const, 0},
 		{"NewBuffer", Func, 0},
@@ -292,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"Split", Func, 0},
 		{"SplitAfter", Func, 0},
 		{"SplitAfterN", Func, 0},
+		{"SplitAfterSeq", Func, 24},
 		{"SplitN", Func, 0},
+		{"SplitSeq", Func, 24},
 		{"Title", Func, 0},
 		{"ToLower", Func, 0},
 		{"ToLowerSpecial", Func, 0},
@@ -534,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"NewCTR", Func, 0},
 		{"NewGCM", Func, 2},
 		{"NewGCMWithNonceSize", Func, 5},
+		{"NewGCMWithRandomNonce", Func, 24},
 		{"NewGCMWithTagSize", Func, 11},
 		{"NewOFB", Func, 0},
 		{"Stream", Type, 0},
@@ -672,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{
 		{"Unmarshal", Func, 0},
 		{"UnmarshalCompressed", Func, 15},
 	},
+	"crypto/fips140": {
+		{"Enabled", Func, 24},
+	},
+	"crypto/hkdf": {
+		{"Expand", Func, 24},
+		{"Extract", Func, 24},
+		{"Key", Func, 24},
+	},
 	"crypto/hmac": {
 		{"Equal", Func, 1},
 		{"New", Func, 0},
@@ -682,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{
 		{"Size", Const, 0},
 		{"Sum", Func, 2},
 	},
+	"crypto/mlkem": {
+		{"(*DecapsulationKey1024).Bytes", Method, 24},
+		{"(*DecapsulationKey1024).Decapsulate", Method, 24},
+		{"(*DecapsulationKey1024).EncapsulationKey", Method, 24},
+		{"(*DecapsulationKey768).Bytes", Method, 24},
+		{"(*DecapsulationKey768).Decapsulate", Method, 24},
+		{"(*DecapsulationKey768).EncapsulationKey", Method, 24},
+		{"(*EncapsulationKey1024).Bytes", Method, 24},
+		{"(*EncapsulationKey1024).Encapsulate", Method, 24},
+		{"(*EncapsulationKey768).Bytes", Method, 24},
+		{"(*EncapsulationKey768).Encapsulate", Method, 24},
+		{"CiphertextSize1024", Const, 24},
+		{"CiphertextSize768", Const, 24},
+		{"DecapsulationKey1024", Type, 24},
+		{"DecapsulationKey768", Type, 24},
+		{"EncapsulationKey1024", Type, 24},
+		{"EncapsulationKey768", Type, 24},
+		{"EncapsulationKeySize1024", Const, 24},
+		{"EncapsulationKeySize768", Const, 24},
+		{"GenerateKey1024", Func, 24},
+		{"GenerateKey768", Func, 24},
+		{"NewDecapsulationKey1024", Func, 24},
+		{"NewDecapsulationKey768", Func, 24},
+		{"NewEncapsulationKey1024", Func, 24},
+		{"NewEncapsulationKey768", Func, 24},
+		{"SeedSize", Const, 24},
+		{"SharedKeySize", Const, 24},
+	},
+	"crypto/pbkdf2": {
+		{"Key", Func, 24},
+	},
 	"crypto/rand": {
 		{"Int", Func, 0},
 		{"Prime", Func, 0},
 		{"Read", Func, 0},
 		{"Reader", Var, 0},
+		{"Text", Func, 24},
 	},
 	"crypto/rc4": {
 		{"(*Cipher).Reset", Method, 0},
@@ -765,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{
 		{"Sum224", Func, 2},
 		{"Sum256", Func, 2},
 	},
+	"crypto/sha3": {
+		{"(*SHA3).AppendBinary", Method, 24},
+		{"(*SHA3).BlockSize", Method, 24},
+		{"(*SHA3).MarshalBinary", Method, 24},
+		{"(*SHA3).Reset", Method, 24},
+		{"(*SHA3).Size", Method, 24},
+		{"(*SHA3).Sum", Method, 24},
+		{"(*SHA3).UnmarshalBinary", Method, 24},
+		{"(*SHA3).Write", Method, 24},
+		{"(*SHAKE).AppendBinary", Method, 24},
+		{"(*SHAKE).BlockSize", Method, 24},
+		{"(*SHAKE).MarshalBinary", Method, 24},
+		{"(*SHAKE).Read", Method, 24},
+		{"(*SHAKE).Reset", Method, 24},
+		{"(*SHAKE).UnmarshalBinary", Method, 24},
+		{"(*SHAKE).Write", Method, 24},
+		{"New224", Func, 24},
+		{"New256", Func, 24},
+		{"New384", Func, 24},
+		{"New512", Func, 24},
+		{"NewCSHAKE128", Func, 24},
+		{"NewCSHAKE256", Func, 24},
+		{"NewSHAKE128", Func, 24},
+		{"NewSHAKE256", Func, 24},
+		{"SHA3", Type, 24},
+		{"SHAKE", Type, 24},
+		{"Sum224", Func, 24},
+		{"Sum256", Func, 24},
+		{"Sum384", Func, 24},
+		{"Sum512", Func, 24},
+		{"SumSHAKE128", Func, 24},
+		{"SumSHAKE256", Func, 24},
+	},
 	"crypto/sha512": {
 		{"BlockSize", Const, 0},
 		{"New", Func, 0},
@@ -787,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ConstantTimeEq", Func, 0},
 		{"ConstantTimeLessOrEq", Func, 2},
 		{"ConstantTimeSelect", Func, 0},
+		{"WithDataIndependentTiming", Func, 24},
 		{"XORBytes", Func, 20},
 	},
 	"crypto/tls": {
@@ -820,6 +901,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*ConnectionState).ExportKeyingMaterial", Method, 11},
 		{"(*Dialer).Dial", Method, 15},
 		{"(*Dialer).DialContext", Method, 15},
+		{"(*ECHRejectionError).Error", Method, 23},
 		{"(*QUICConn).Close", Method, 21},
 		{"(*QUICConn).ConnectionState", Method, 21},
 		{"(*QUICConn).HandleData", Method, 21},
@@ -827,6 +909,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*QUICConn).SendSessionTicket", Method, 21},
 		{"(*QUICConn).SetTransportParameters", Method, 21},
 		{"(*QUICConn).Start", Method, 21},
+		{"(*QUICConn).StoreSession", Method, 23},
 		{"(*SessionState).Bytes", Method, 21},
 		{"(AlertError).Error", Method, 21},
 		{"(ClientAuthType).String", Method, 15},
@@ -861,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ClientHelloInfo", Type, 4},
 		{"ClientHelloInfo.CipherSuites", Field, 4},
 		{"ClientHelloInfo.Conn", Field, 8},
+		{"ClientHelloInfo.Extensions", Field, 24},
 		{"ClientHelloInfo.ServerName", Field, 4},
 		{"ClientHelloInfo.SignatureSchemes", Field, 8},
 		{"ClientHelloInfo.SupportedCurves", Field, 4},
@@ -877,6 +961,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"Config.ClientSessionCache", Field, 3},
 		{"Config.CurvePreferences", Field, 3},
 		{"Config.DynamicRecordSizingDisabled", Field, 7},
+		{"Config.EncryptedClientHelloConfigList", Field, 23},
+		{"Config.EncryptedClientHelloKeys", Field, 24},
+		{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
 		{"Config.GetCertificate", Field, 4},
 		{"Config.GetClientCertificate", Field, 8},
 		{"Config.GetConfigForClient", Field, 8},
@@ -902,6 +989,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ConnectionState", Type, 0},
 		{"ConnectionState.CipherSuite", Field, 0},
 		{"ConnectionState.DidResume", Field, 1},
+		{"ConnectionState.ECHAccepted", Field, 23},
 		{"ConnectionState.HandshakeComplete", Field, 0},
 		{"ConnectionState.NegotiatedProtocol", Field, 0},
 		{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0},
@@ -925,7 +1013,13 @@ var PackageSymbols = map[string][]Symbol{
 		{"ECDSAWithP384AndSHA384", Const, 8},
 		{"ECDSAWithP521AndSHA512", Const, 8},
 		{"ECDSAWithSHA1", Const, 10},
+		{"ECHRejectionError", Type, 23},
+		{"ECHRejectionError.RetryConfigList", Field, 23},
 		{"Ed25519", Const, 13},
+		{"EncryptedClientHelloKey", Type, 24},
+		{"EncryptedClientHelloKey.Config", Field, 24},
+		{"EncryptedClientHelloKey.PrivateKey", Field, 24},
+		{"EncryptedClientHelloKey.SendAsRetry", Field, 24},
 		{"InsecureCipherSuites", Func, 14},
 		{"Listen", Func, 0},
 		{"LoadX509KeyPair", Func, 0},
@@ -943,6 +1037,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ParseSessionState", Func, 21},
 		{"QUICClient", Func, 21},
 		{"QUICConfig", Type, 21},
+		{"QUICConfig.EnableSessionEvents", Field, 23},
 		{"QUICConfig.TLSConfig", Field, 21},
 		{"QUICConn", Type, 21},
 		{"QUICEncryptionLevel", Type, 21},
@@ -954,16 +1049,20 @@ var PackageSymbols = map[string][]Symbol{
 		{"QUICEvent.Data", Field, 21},
 		{"QUICEvent.Kind", Field, 21},
 		{"QUICEvent.Level", Field, 21},
+		{"QUICEvent.SessionState", Field, 23},
 		{"QUICEvent.Suite", Field, 21},
 		{"QUICEventKind", Type, 21},
 		{"QUICHandshakeDone", Const, 21},
 		{"QUICNoEvent", Const, 21},
 		{"QUICRejectedEarlyData", Const, 21},
+		{"QUICResumeSession", Const, 23},
 		{"QUICServer", Func, 21},
 		{"QUICSessionTicketOptions", Type, 21},
 		{"QUICSessionTicketOptions.EarlyData", Field, 21},
+		{"QUICSessionTicketOptions.Extra", Field, 23},
 		{"QUICSetReadSecret", Const, 21},
 		{"QUICSetWriteSecret", Const, 21},
+		{"QUICStoreSession", Const, 23},
 		{"QUICTransportParameters", Const, 21},
 		{"QUICTransportParametersRequired", Const, 21},
 		{"QUICWriteData", Const, 21},
@@ -1019,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"VersionTLS12", Const, 2},
 		{"VersionTLS13", Const, 12},
 		{"X25519", Const, 8},
+		{"X25519MLKEM768", Const, 24},
 		{"X509KeyPair", Func, 0},
 	},
 	"crypto/x509": {
@@ -1036,13 +1136,19 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Certificate).Verify", Method, 0},
 		{"(*Certificate).VerifyHostname", Method, 0},
 		{"(*CertificateRequest).CheckSignature", Method, 5},
+		{"(*OID).UnmarshalBinary", Method, 23},
+		{"(*OID).UnmarshalText", Method, 23},
 		{"(*RevocationList).CheckSignatureFrom", Method, 19},
 		{"(CertificateInvalidError).Error", Method, 0},
 		{"(ConstraintViolationError).Error", Method, 0},
 		{"(HostnameError).Error", Method, 0},
 		{"(InsecureAlgorithmError).Error", Method, 6},
+		{"(OID).AppendBinary", Method, 24},
+		{"(OID).AppendText", Method, 24},
 		{"(OID).Equal", Method, 22},
 		{"(OID).EqualASN1OID", Method, 22},
+		{"(OID).MarshalBinary", Method, 23},
+		{"(OID).MarshalText", Method, 23},
 		{"(OID).String", Method, 22},
 		{"(PublicKeyAlgorithm).String", Method, 10},
 		{"(SignatureAlgorithm).String", Method, 6},
@@ -1067,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"Certificate.Extensions", Field, 2},
 		{"Certificate.ExtraExtensions", Field, 2},
 		{"Certificate.IPAddresses", Field, 1},
+		{"Certificate.InhibitAnyPolicy", Field, 24},
+		{"Certificate.InhibitAnyPolicyZero", Field, 24},
+		{"Certificate.InhibitPolicyMapping", Field, 24},
+		{"Certificate.InhibitPolicyMappingZero", Field, 24},
 		{"Certificate.IsCA", Field, 0},
 		{"Certificate.Issuer", Field, 0},
 		{"Certificate.IssuingCertificateURL", Field, 2},
@@ -1083,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Certificate.PermittedURIDomains", Field, 10},
 		{"Certificate.Policies", Field, 22},
 		{"Certificate.PolicyIdentifiers", Field, 0},
+		{"Certificate.PolicyMappings", Field, 24},
 		{"Certificate.PublicKey", Field, 0},
 		{"Certificate.PublicKeyAlgorithm", Field, 0},
 		{"Certificate.Raw", Field, 0},
@@ -1090,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"Certificate.RawSubject", Field, 0},
 		{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
 		{"Certificate.RawTBSCertificate", Field, 0},
+		{"Certificate.RequireExplicitPolicy", Field, 24},
+		{"Certificate.RequireExplicitPolicyZero", Field, 24},
 		{"Certificate.SerialNumber", Field, 0},
 		{"Certificate.Signature", Field, 0},
 		{"Certificate.SignatureAlgorithm", Field, 0},
@@ -1181,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"NameConstraintsWithoutSANs", Const, 10},
 		{"NameMismatch", Const, 8},
 		{"NewCertPool", Func, 0},
+		{"NoValidChains", Const, 24},
 		{"NotAuthorizedToSign", Const, 0},
 		{"OID", Type, 22},
 		{"OIDFromInts", Func, 22},
@@ -1196,11 +1310,15 @@ var PackageSymbols = map[string][]Symbol{
 		{"ParseCertificates", Func, 0},
 		{"ParseDERCRL", Func, 0},
 		{"ParseECPrivateKey", Func, 1},
+		{"ParseOID", Func, 23},
 		{"ParsePKCS1PrivateKey", Func, 0},
 		{"ParsePKCS1PublicKey", Func, 10},
 		{"ParsePKCS8PrivateKey", Func, 0},
 		{"ParsePKIXPublicKey", Func, 0},
 		{"ParseRevocationList", Func, 19},
+		{"PolicyMapping", Type, 24},
+		{"PolicyMapping.IssuerDomainPolicy", Field, 24},
+		{"PolicyMapping.SubjectDomainPolicy", Field, 24},
 		{"PublicKeyAlgorithm", Type, 0},
 		{"PureEd25519", Const, 13},
 		{"RSA", Const, 0},
@@ -1247,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"UnknownPublicKeyAlgorithm", Const, 0},
 		{"UnknownSignatureAlgorithm", Const, 0},
 		{"VerifyOptions", Type, 0},
+		{"VerifyOptions.CertificatePolicies", Field, 24},
 		{"VerifyOptions.CurrentTime", Field, 0},
 		{"VerifyOptions.DNSName", Field, 0},
 		{"VerifyOptions.Intermediates", Field, 0},
@@ -1957,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*File).DynString", Method, 1},
 		{"(*File).DynValue", Method, 21},
 		{"(*File).DynamicSymbols", Method, 4},
+		{"(*File).DynamicVersionNeeds", Method, 24},
+		{"(*File).DynamicVersions", Method, 24},
 		{"(*File).ImportedLibraries", Method, 0},
 		{"(*File).ImportedSymbols", Method, 0},
 		{"(*File).Section", Method, 0},
@@ -2030,6 +2151,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Type).String", Method, 0},
 		{"(Version).GoString", Method, 0},
 		{"(Version).String", Method, 0},
+		{"(VersionIndex).Index", Method, 24},
+		{"(VersionIndex).IsHidden", Method, 24},
 		{"ARM_MAGIC_TRAMP_NUMBER", Const, 0},
 		{"COMPRESS_HIOS", Const, 6},
 		{"COMPRESS_HIPROC", Const, 6},
@@ -2222,6 +2345,19 @@ var PackageSymbols = map[string][]Symbol{
 		{"DynFlag", Type, 0},
 		{"DynFlag1", Type, 21},
 		{"DynTag", Type, 0},
+		{"DynamicVersion", Type, 24},
+		{"DynamicVersion.Deps", Field, 24},
+		{"DynamicVersion.Flags", Field, 24},
+		{"DynamicVersion.Index", Field, 24},
+		{"DynamicVersion.Name", Field, 24},
+		{"DynamicVersionDep", Type, 24},
+		{"DynamicVersionDep.Dep", Field, 24},
+		{"DynamicVersionDep.Flags", Field, 24},
+		{"DynamicVersionDep.Index", Field, 24},
+		{"DynamicVersionFlag", Type, 24},
+		{"DynamicVersionNeed", Type, 24},
+		{"DynamicVersionNeed.Name", Field, 24},
+		{"DynamicVersionNeed.Needs", Field, 24},
 		{"EI_ABIVERSION", Const, 0},
 		{"EI_CLASS", Const, 0},
 		{"EI_DATA", Const, 0},
@@ -2541,6 +2677,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"PT_NOTE", Const, 0},
 		{"PT_NULL", Const, 0},
 		{"PT_OPENBSD_BOOTDATA", Const, 16},
+		{"PT_OPENBSD_NOBTCFI", Const, 23},
 		{"PT_OPENBSD_RANDOMIZE", Const, 16},
 		{"PT_OPENBSD_WXNEEDED", Const, 16},
 		{"PT_PAX_FLAGS", Const, 16},
@@ -3620,13 +3757,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"STT_COMMON", Const, 0},
 		{"STT_FILE", Const, 0},
 		{"STT_FUNC", Const, 0},
+		{"STT_GNU_IFUNC", Const, 23},
 		{"STT_HIOS", Const, 0},
 		{"STT_HIPROC", Const, 0},
 		{"STT_LOOS", Const, 0},
 		{"STT_LOPROC", Const, 0},
 		{"STT_NOTYPE", Const, 0},
 		{"STT_OBJECT", Const, 0},
+		{"STT_RELC", Const, 23},
 		{"STT_SECTION", Const, 0},
+		{"STT_SRELC", Const, 23},
 		{"STT_TLS", Const, 0},
 		{"STV_DEFAULT", Const, 0},
 		{"STV_HIDDEN", Const, 0},
@@ -3696,6 +3836,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"SymType", Type, 0},
 		{"SymVis", Type, 0},
 		{"Symbol", Type, 0},
+		{"Symbol.HasVersion", Field, 24},
 		{"Symbol.Info", Field, 0},
 		{"Symbol.Library", Field, 13},
 		{"Symbol.Name", Field, 0},
@@ -3704,8 +3845,13 @@ var PackageSymbols = map[string][]Symbol{
 		{"Symbol.Size", Field, 0},
 		{"Symbol.Value", Field, 0},
 		{"Symbol.Version", Field, 13},
+		{"Symbol.VersionIndex", Field, 24},
 		{"Type", Type, 0},
+		{"VER_FLG_BASE", Const, 24},
+		{"VER_FLG_INFO", Const, 24},
+		{"VER_FLG_WEAK", Const, 24},
 		{"Version", Type, 0},
+		{"VersionIndex", Type, 24},
 	},
 	"debug/gosym": {
 		{"(*DecodingError).Error", Method, 0},
@@ -4431,8 +4577,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"FS", Type, 16},
 	},
 	"encoding": {
+		{"BinaryAppender", Type, 24},
 		{"BinaryMarshaler", Type, 2},
 		{"BinaryUnmarshaler", Type, 2},
+		{"TextAppender", Type, 24},
 		{"TextMarshaler", Type, 2},
 		{"TextUnmarshaler", Type, 2},
 	},
@@ -4544,11 +4692,14 @@ var PackageSymbols = map[string][]Symbol{
 		{"URLEncoding", Var, 0},
 	},
 	"encoding/binary": {
+		{"Append", Func, 23},
 		{"AppendByteOrder", Type, 19},
 		{"AppendUvarint", Func, 19},
 		{"AppendVarint", Func, 19},
 		{"BigEndian", Var, 0},
 		{"ByteOrder", Type, 0},
+		{"Decode", Func, 23},
+		{"Encode", Func, 23},
 		{"LittleEndian", Var, 0},
 		{"MaxVarintLen16", Const, 0},
 		{"MaxVarintLen32", Const, 0},
@@ -5308,6 +5459,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ParenExpr.Rparen", Field, 0},
 		{"ParenExpr.X", Field, 0},
 		{"Pkg", Const, 0},
+		{"Preorder", Func, 23},
 		{"Print", Func, 0},
 		{"RECV", Const, 0},
 		{"RangeStmt", Type, 0},
@@ -5898,7 +6050,12 @@ var PackageSymbols = map[string][]Symbol{
 	},
 	"go/types": {
 		{"(*Alias).Obj", Method, 22},
+		{"(*Alias).Origin", Method, 23},
+		{"(*Alias).Rhs", Method, 23},
+		{"(*Alias).SetTypeParams", Method, 23},
 		{"(*Alias).String", Method, 22},
+		{"(*Alias).TypeArgs", Method, 23},
+		{"(*Alias).TypeParams", Method, 23},
 		{"(*Alias).Underlying", Method, 22},
 		{"(*ArgumentError).Error", Method, 18},
 		{"(*ArgumentError).Unwrap", Method, 18},
@@ -5943,6 +6100,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Func).Pkg", Method, 5},
 		{"(*Func).Pos", Method, 5},
 		{"(*Func).Scope", Method, 5},
+		{"(*Func).Signature", Method, 23},
 		{"(*Func).String", Method, 5},
 		{"(*Func).Type", Method, 5},
 		{"(*Info).ObjectOf", Method, 5},
@@ -5952,13 +6110,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Interface).Complete", Method, 5},
 		{"(*Interface).Embedded", Method, 5},
 		{"(*Interface).EmbeddedType", Method, 11},
+		{"(*Interface).EmbeddedTypes", Method, 24},
 		{"(*Interface).Empty", Method, 5},
 		{"(*Interface).ExplicitMethod", Method, 5},
+		{"(*Interface).ExplicitMethods", Method, 24},
 		{"(*Interface).IsComparable", Method, 18},
 		{"(*Interface).IsImplicit", Method, 18},
 		{"(*Interface).IsMethodSet", Method, 18},
 		{"(*Interface).MarkImplicit", Method, 18},
 		{"(*Interface).Method", Method, 5},
+		{"(*Interface).Methods", Method, 24},
 		{"(*Interface).NumEmbeddeds", Method, 5},
 		{"(*Interface).NumExplicitMethods", Method, 5},
 		{"(*Interface).NumMethods", Method, 5},
@@ -5979,9 +6140,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*MethodSet).At", Method, 5},
 		{"(*MethodSet).Len", Method, 5},
 		{"(*MethodSet).Lookup", Method, 5},
+		{"(*MethodSet).Methods", Method, 24},
 		{"(*MethodSet).String", Method, 5},
 		{"(*Named).AddMethod", Method, 5},
 		{"(*Named).Method", Method, 5},
+		{"(*Named).Methods", Method, 24},
 		{"(*Named).NumMethods", Method, 5},
 		{"(*Named).Obj", Method, 5},
 		{"(*Named).Origin", Method, 18},
@@ -6022,6 +6185,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Pointer).String", Method, 5},
 		{"(*Pointer).Underlying", Method, 5},
 		{"(*Scope).Child", Method, 5},
+		{"(*Scope).Children", Method, 24},
 		{"(*Scope).Contains", Method, 5},
 		{"(*Scope).End", Method, 5},
 		{"(*Scope).Innermost", Method, 5},
@@ -6057,6 +6221,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*StdSizes).Offsetsof", Method, 5},
 		{"(*StdSizes).Sizeof", Method, 5},
 		{"(*Struct).Field", Method, 5},
+		{"(*Struct).Fields", Method, 24},
 		{"(*Struct).NumFields", Method, 5},
 		{"(*Struct).String", Method, 5},
 		{"(*Struct).Tag", Method, 5},
@@ -6068,8 +6233,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Tuple).Len", Method, 5},
 		{"(*Tuple).String", Method, 5},
 		{"(*Tuple).Underlying", Method, 5},
+		{"(*Tuple).Variables", Method, 24},
 		{"(*TypeList).At", Method, 18},
 		{"(*TypeList).Len", Method, 18},
+		{"(*TypeList).Types", Method, 24},
 		{"(*TypeName).Exported", Method, 5},
 		{"(*TypeName).Id", Method, 5},
 		{"(*TypeName).IsAlias", Method, 9},
@@ -6087,9 +6254,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*TypeParam).Underlying", Method, 18},
 		{"(*TypeParamList).At", Method, 18},
 		{"(*TypeParamList).Len", Method, 18},
+		{"(*TypeParamList).TypeParams", Method, 24},
 		{"(*Union).Len", Method, 18},
 		{"(*Union).String", Method, 18},
 		{"(*Union).Term", Method, 18},
+		{"(*Union).Terms", Method, 24},
 		{"(*Union).Underlying", Method, 18},
 		{"(*Var).Anonymous", Method, 5},
 		{"(*Var).Embedded", Method, 11},
@@ -6360,10 +6529,12 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Hash).WriteByte", Method, 14},
 		{"(*Hash).WriteString", Method, 14},
 		{"Bytes", Func, 19},
+		{"Comparable", Func, 24},
 		{"Hash", Type, 14},
 		{"MakeSeed", Func, 14},
 		{"Seed", Type, 14},
 		{"String", Func, 19},
+		{"WriteComparable", Func, 24},
 	},
 	"html": {
 		{"EscapeString", Func, 0},
@@ -6992,6 +7163,12 @@ var PackageSymbols = map[string][]Symbol{
 		{"TempFile", Func, 0},
 		{"WriteFile", Func, 0},
 	},
+	"iter": {
+		{"Pull", Func, 23},
+		{"Pull2", Func, 23},
+		{"Seq", Type, 23},
+		{"Seq2", Type, 23},
+	},
 	"log": {
 		{"(*Logger).Fatal", Method, 0},
 		{"(*Logger).Fatalf", Method, 0},
@@ -7044,6 +7221,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*JSONHandler).WithGroup", Method, 21},
 		{"(*Level).UnmarshalJSON", Method, 21},
 		{"(*Level).UnmarshalText", Method, 21},
+		{"(*LevelVar).AppendText", Method, 24},
 		{"(*LevelVar).Level", Method, 21},
 		{"(*LevelVar).MarshalText", Method, 21},
 		{"(*LevelVar).Set", Method, 21},
@@ -7072,6 +7250,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Attr).Equal", Method, 21},
 		{"(Attr).String", Method, 21},
 		{"(Kind).String", Method, 21},
+		{"(Level).AppendText", Method, 24},
 		{"(Level).Level", Method, 21},
 		{"(Level).MarshalJSON", Method, 21},
 		{"(Level).MarshalText", Method, 21},
@@ -7102,6 +7281,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Debug", Func, 21},
 		{"DebugContext", Func, 21},
 		{"Default", Func, 21},
+		{"DiscardHandler", Var, 24},
 		{"Duration", Func, 21},
 		{"DurationValue", Func, 21},
 		{"Error", Func, 21},
@@ -7222,11 +7402,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"Writer", Type, 0},
 	},
 	"maps": {
+		{"All", Func, 23},
 		{"Clone", Func, 21},
+		{"Collect", Func, 23},
 		{"Copy", Func, 21},
 		{"DeleteFunc", Func, 21},
 		{"Equal", Func, 21},
 		{"EqualFunc", Func, 21},
+		{"Insert", Func, 23},
+		{"Keys", Func, 23},
+		{"Values", Func, 23},
 	},
 	"math": {
 		{"Abs", Func, 0},
@@ -7332,6 +7517,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Float).Acc", Method, 5},
 		{"(*Float).Add", Method, 5},
 		{"(*Float).Append", Method, 5},
+		{"(*Float).AppendText", Method, 24},
 		{"(*Float).Cmp", Method, 5},
 		{"(*Float).Copy", Method, 5},
 		{"(*Float).Float32", Method, 5},
@@ -7378,6 +7564,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Int).And", Method, 0},
 		{"(*Int).AndNot", Method, 0},
 		{"(*Int).Append", Method, 6},
+		{"(*Int).AppendText", Method, 24},
 		{"(*Int).Binomial", Method, 0},
 		{"(*Int).Bit", Method, 0},
 		{"(*Int).BitLen", Method, 0},
@@ -7434,6 +7621,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Int).Xor", Method, 0},
 		{"(*Rat).Abs", Method, 0},
 		{"(*Rat).Add", Method, 0},
+		{"(*Rat).AppendText", Method, 24},
 		{"(*Rat).Cmp", Method, 0},
 		{"(*Rat).Denom", Method, 0},
 		{"(*Rat).Float32", Method, 4},
@@ -7616,10 +7804,13 @@ var PackageSymbols = map[string][]Symbol{
 		{"Zipf", Type, 0},
 	},
 	"math/rand/v2": {
+		{"(*ChaCha8).AppendBinary", Method, 24},
 		{"(*ChaCha8).MarshalBinary", Method, 22},
+		{"(*ChaCha8).Read", Method, 23},
 		{"(*ChaCha8).Seed", Method, 22},
 		{"(*ChaCha8).Uint64", Method, 22},
 		{"(*ChaCha8).UnmarshalBinary", Method, 22},
+		{"(*PCG).AppendBinary", Method, 24},
 		{"(*PCG).MarshalBinary", Method, 22},
 		{"(*PCG).Seed", Method, 22},
 		{"(*PCG).Uint64", Method, 22},
@@ -7636,6 +7827,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Rand).NormFloat64", Method, 22},
 		{"(*Rand).Perm", Method, 22},
 		{"(*Rand).Shuffle", Method, 22},
+		{"(*Rand).Uint", Method, 23},
 		{"(*Rand).Uint32", Method, 22},
 		{"(*Rand).Uint32N", Method, 22},
 		{"(*Rand).Uint64", Method, 22},
@@ -7663,6 +7855,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Rand", Type, 22},
 		{"Shuffle", Func, 22},
 		{"Source", Type, 22},
+		{"Uint", Func, 23},
 		{"Uint32", Func, 22},
 		{"Uint32N", Func, 22},
 		{"Uint64", Func, 22},
@@ -7743,6 +7936,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*DNSError).Error", Method, 0},
 		{"(*DNSError).Temporary", Method, 0},
 		{"(*DNSError).Timeout", Method, 0},
+		{"(*DNSError).Unwrap", Method, 23},
 		{"(*Dialer).Dial", Method, 1},
 		{"(*Dialer).DialContext", Method, 7},
 		{"(*Dialer).MultipathTCP", Method, 21},
@@ -7809,6 +8003,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*TCPConn).RemoteAddr", Method, 0},
 		{"(*TCPConn).SetDeadline", Method, 0},
 		{"(*TCPConn).SetKeepAlive", Method, 0},
+		{"(*TCPConn).SetKeepAliveConfig", Method, 23},
 		{"(*TCPConn).SetKeepAlivePeriod", Method, 2},
 		{"(*TCPConn).SetLinger", Method, 0},
 		{"(*TCPConn).SetNoDelay", Method, 0},
@@ -7883,6 +8078,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*UnixListener).SyscallConn", Method, 10},
 		{"(Flags).String", Method, 0},
 		{"(HardwareAddr).String", Method, 0},
+		{"(IP).AppendText", Method, 24},
 		{"(IP).DefaultMask", Method, 0},
 		{"(IP).Equal", Method, 0},
 		{"(IP).IsGlobalUnicast", Method, 0},
@@ -7922,6 +8118,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"DNSError.IsTimeout", Field, 0},
 		{"DNSError.Name", Field, 0},
 		{"DNSError.Server", Field, 0},
+		{"DNSError.UnwrapErr", Field, 23},
 		{"DefaultResolver", Var, 8},
 		{"Dial", Func, 0},
 		{"DialIP", Func, 0},
@@ -7937,6 +8134,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Dialer.DualStack", Field, 2},
 		{"Dialer.FallbackDelay", Field, 5},
 		{"Dialer.KeepAlive", Field, 3},
+		{"Dialer.KeepAliveConfig", Field, 23},
 		{"Dialer.LocalAddr", Field, 1},
 		{"Dialer.Resolver", Field, 8},
 		{"Dialer.Timeout", Field, 1},
@@ -7989,10 +8187,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"Interfaces", Func, 0},
 		{"InvalidAddrError", Type, 0},
 		{"JoinHostPort", Func, 0},
+		{"KeepAliveConfig", Type, 23},
+		{"KeepAliveConfig.Count", Field, 23},
+		{"KeepAliveConfig.Enable", Field, 23},
+		{"KeepAliveConfig.Idle", Field, 23},
+		{"KeepAliveConfig.Interval", Field, 23},
 		{"Listen", Func, 0},
 		{"ListenConfig", Type, 11},
 		{"ListenConfig.Control", Field, 11},
 		{"ListenConfig.KeepAlive", Field, 13},
+		{"ListenConfig.KeepAliveConfig", Field, 23},
 		{"ListenIP", Func, 0},
 		{"ListenMulticastUDP", Func, 0},
 		{"ListenPacket", Func, 0},
@@ -8075,12 +8279,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*MaxBytesError).Error", Method, 19},
 		{"(*ProtocolError).Error", Method, 0},
 		{"(*ProtocolError).Is", Method, 21},
+		{"(*Protocols).SetHTTP1", Method, 24},
+		{"(*Protocols).SetHTTP2", Method, 24},
+		{"(*Protocols).SetUnencryptedHTTP2", Method, 24},
 		{"(*Request).AddCookie", Method, 0},
 		{"(*Request).BasicAuth", Method, 4},
 		{"(*Request).Clone", Method, 13},
 		{"(*Request).Context", Method, 7},
 		{"(*Request).Cookie", Method, 0},
 		{"(*Request).Cookies", Method, 0},
+		{"(*Request).CookiesNamed", Method, 23},
 		{"(*Request).FormFile", Method, 0},
 		{"(*Request).FormValue", Method, 0},
 		{"(*Request).MultipartReader", Method, 0},
@@ -8133,6 +8341,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Header).Values", Method, 14},
 		{"(Header).Write", Method, 0},
 		{"(Header).WriteSubset", Method, 0},
+		{"(Protocols).HTTP1", Method, 24},
+		{"(Protocols).HTTP2", Method, 24},
+		{"(Protocols).String", Method, 24},
+		{"(Protocols).UnencryptedHTTP2", Method, 24},
 		{"AllowQuerySemicolons", Func, 17},
 		{"CanonicalHeaderKey", Func, 0},
 		{"Client", Type, 0},
@@ -8148,7 +8360,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"Cookie.HttpOnly", Field, 0},
 		{"Cookie.MaxAge", Field, 0},
 		{"Cookie.Name", Field, 0},
+		{"Cookie.Partitioned", Field, 23},
 		{"Cookie.Path", Field, 0},
+		{"Cookie.Quoted", Field, 23},
 		{"Cookie.Raw", Field, 0},
 		{"Cookie.RawExpires", Field, 0},
 		{"Cookie.SameSite", Field, 11},
@@ -8193,6 +8407,18 @@ var PackageSymbols = map[string][]Symbol{
 		{"FileSystem", Type, 0},
 		{"Flusher", Type, 0},
 		{"Get", Func, 0},
+		{"HTTP2Config", Type, 24},
+		{"HTTP2Config.CountError", Field, 24},
+		{"HTTP2Config.MaxConcurrentStreams", Field, 24},
+		{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24},
+		{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24},
+		{"HTTP2Config.MaxReadFrameSize", Field, 24},
+		{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24},
+		{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24},
+		{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24},
+		{"HTTP2Config.PingTimeout", Field, 24},
+		{"HTTP2Config.SendPingTimeout", Field, 24},
+		{"HTTP2Config.WriteByteTimeout", Field, 24},
 		{"Handle", Func, 0},
 		{"HandleFunc", Func, 0},
 		{"Handler", Type, 0},
@@ -8225,12 +8451,15 @@ var PackageSymbols = map[string][]Symbol{
 		{"NoBody", Var, 8},
 		{"NotFound", Func, 0},
 		{"NotFoundHandler", Func, 0},
+		{"ParseCookie", Func, 23},
 		{"ParseHTTPVersion", Func, 0},
+		{"ParseSetCookie", Func, 23},
 		{"ParseTime", Func, 1},
 		{"Post", Func, 0},
 		{"PostForm", Func, 0},
 		{"ProtocolError", Type, 0},
 		{"ProtocolError.ErrorString", Field, 0},
+		{"Protocols", Type, 24},
 		{"ProxyFromEnvironment", Func, 0},
 		{"ProxyURL", Func, 0},
 		{"PushOptions", Type, 8},
@@ -8252,6 +8481,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Request.Host", Field, 0},
 		{"Request.Method", Field, 0},
 		{"Request.MultipartForm", Field, 0},
+		{"Request.Pattern", Field, 23},
 		{"Request.PostForm", Field, 1},
 		{"Request.Proto", Field, 0},
 		{"Request.ProtoMajor", Field, 0},
@@ -8299,9 +8529,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"Server.ConnState", Field, 3},
 		{"Server.DisableGeneralOptionsHandler", Field, 20},
 		{"Server.ErrorLog", Field, 3},
+		{"Server.HTTP2", Field, 24},
 		{"Server.Handler", Field, 0},
 		{"Server.IdleTimeout", Field, 8},
 		{"Server.MaxHeaderBytes", Field, 0},
+		{"Server.Protocols", Field, 24},
 		{"Server.ReadHeaderTimeout", Field, 8},
 		{"Server.ReadTimeout", Field, 0},
 		{"Server.TLSConfig", Field, 0},
@@ -8391,12 +8623,14 @@ var PackageSymbols = map[string][]Symbol{
 		{"Transport.ExpectContinueTimeout", Field, 6},
 		{"Transport.ForceAttemptHTTP2", Field, 13},
 		{"Transport.GetProxyConnectHeader", Field, 16},
+		{"Transport.HTTP2", Field, 24},
 		{"Transport.IdleConnTimeout", Field, 7},
 		{"Transport.MaxConnsPerHost", Field, 11},
 		{"Transport.MaxIdleConns", Field, 7},
 		{"Transport.MaxIdleConnsPerHost", Field, 0},
 		{"Transport.MaxResponseHeaderBytes", Field, 7},
 		{"Transport.OnProxyConnectResponse", Field, 20},
+		{"Transport.Protocols", Field, 24},
 		{"Transport.Proxy", Field, 0},
 		{"Transport.ProxyConnectHeader", Field, 8},
 		{"Transport.ReadBufferSize", Field, 13},
@@ -8453,6 +8687,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"DefaultRemoteAddr", Const, 0},
 		{"NewRecorder", Func, 0},
 		{"NewRequest", Func, 7},
+		{"NewRequestWithContext", Func, 23},
 		{"NewServer", Func, 0},
 		{"NewTLSServer", Func, 0},
 		{"NewUnstartedServer", Func, 0},
@@ -8583,6 +8818,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*AddrPort).UnmarshalText", Method, 18},
 		{"(*Prefix).UnmarshalBinary", Method, 18},
 		{"(*Prefix).UnmarshalText", Method, 18},
+		{"(Addr).AppendBinary", Method, 24},
+		{"(Addr).AppendText", Method, 24},
 		{"(Addr).AppendTo", Method, 18},
 		{"(Addr).As16", Method, 18},
 		{"(Addr).As4", Method, 18},
@@ -8613,6 +8850,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Addr).WithZone", Method, 18},
 		{"(Addr).Zone", Method, 18},
 		{"(AddrPort).Addr", Method, 18},
+		{"(AddrPort).AppendBinary", Method, 24},
+		{"(AddrPort).AppendText", Method, 24},
 		{"(AddrPort).AppendTo", Method, 18},
 		{"(AddrPort).Compare", Method, 22},
 		{"(AddrPort).IsValid", Method, 18},
@@ -8621,6 +8860,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(AddrPort).Port", Method, 18},
 		{"(AddrPort).String", Method, 18},
 		{"(Prefix).Addr", Method, 18},
+		{"(Prefix).AppendBinary", Method, 24},
+		{"(Prefix).AppendText", Method, 24},
 		{"(Prefix).AppendTo", Method, 18},
 		{"(Prefix).Bits", Method, 18},
 		{"(Prefix).Contains", Method, 18},
@@ -8805,6 +9046,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Error).Temporary", Method, 6},
 		{"(*Error).Timeout", Method, 6},
 		{"(*Error).Unwrap", Method, 13},
+		{"(*URL).AppendBinary", Method, 24},
 		{"(*URL).EscapedFragment", Method, 15},
 		{"(*URL).EscapedPath", Method, 5},
 		{"(*URL).Hostname", Method, 8},
@@ -8904,6 +9146,17 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*ProcessState).SysUsage", Method, 0},
 		{"(*ProcessState).SystemTime", Method, 0},
 		{"(*ProcessState).UserTime", Method, 0},
+		{"(*Root).Close", Method, 24},
+		{"(*Root).Create", Method, 24},
+		{"(*Root).FS", Method, 24},
+		{"(*Root).Lstat", Method, 24},
+		{"(*Root).Mkdir", Method, 24},
+		{"(*Root).Name", Method, 24},
+		{"(*Root).Open", Method, 24},
+		{"(*Root).OpenFile", Method, 24},
+		{"(*Root).OpenRoot", Method, 24},
+		{"(*Root).Remove", Method, 24},
+		{"(*Root).Stat", Method, 24},
 		{"(*SyscallError).Error", Method, 0},
 		{"(*SyscallError).Timeout", Method, 10},
 		{"(*SyscallError).Unwrap", Method, 13},
@@ -8917,6 +9170,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Chown", Func, 0},
 		{"Chtimes", Func, 0},
 		{"Clearenv", Func, 0},
+		{"CopyFS", Func, 23},
 		{"Create", Func, 0},
 		{"CreateTemp", Func, 16},
 		{"DevNull", Const, 0},
@@ -8996,6 +9250,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"O_WRONLY", Const, 0},
 		{"Open", Func, 0},
 		{"OpenFile", Func, 0},
+		{"OpenInRoot", Func, 24},
+		{"OpenRoot", Func, 24},
 		{"PathError", Type, 0},
 		{"PathError.Err", Field, 0},
 		{"PathError.Op", Field, 0},
@@ -9017,6 +9273,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Remove", Func, 0},
 		{"RemoveAll", Func, 0},
 		{"Rename", Func, 0},
+		{"Root", Type, 24},
 		{"SEEK_CUR", Const, 0},
 		{"SEEK_END", Const, 0},
 		{"SEEK_SET", Const, 0},
@@ -9150,6 +9407,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"IsLocal", Func, 20},
 		{"Join", Func, 0},
 		{"ListSeparator", Const, 0},
+		{"Localize", Func, 23},
 		{"Match", Func, 0},
 		{"Rel", Func, 0},
 		{"Separator", Const, 0},
@@ -9232,6 +9490,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Value).Pointer", Method, 0},
 		{"(Value).Recv", Method, 0},
 		{"(Value).Send", Method, 0},
+		{"(Value).Seq", Method, 23},
+		{"(Value).Seq2", Method, 23},
 		{"(Value).Set", Method, 0},
 		{"(Value).SetBool", Method, 0},
 		{"(Value).SetBytes", Method, 0},
@@ -9314,6 +9574,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"SelectSend", Const, 1},
 		{"SendDir", Const, 0},
 		{"Slice", Const, 0},
+		{"SliceAt", Func, 23},
 		{"SliceHeader", Type, 0},
 		{"SliceHeader.Cap", Field, 0},
 		{"SliceHeader.Data", Field, 0},
@@ -9354,6 +9615,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Zero", Func, 0},
 	},
 	"regexp": {
+		{"(*Regexp).AppendText", Method, 24},
 		{"(*Regexp).Copy", Method, 6},
 		{"(*Regexp).Expand", Method, 0},
 		{"(*Regexp).ExpandString", Method, 0},
@@ -9534,6 +9796,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*StackRecord).Stack", Method, 0},
 		{"(*TypeAssertionError).Error", Method, 0},
 		{"(*TypeAssertionError).RuntimeError", Method, 0},
+		{"(Cleanup).Stop", Method, 24},
+		{"AddCleanup", Func, 24},
 		{"BlockProfile", Func, 1},
 		{"BlockProfileRecord", Type, 1},
 		{"BlockProfileRecord.Count", Field, 1},
@@ -9544,6 +9808,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Caller", Func, 0},
 		{"Callers", Func, 0},
 		{"CallersFrames", Func, 7},
+		{"Cleanup", Type, 24},
 		{"Compiler", Const, 0},
 		{"Error", Type, 0},
 		{"Frame", Type, 7},
@@ -9655,6 +9920,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"BuildSetting", Type, 18},
 		{"BuildSetting.Key", Field, 18},
 		{"BuildSetting.Value", Field, 18},
+		{"CrashOptions", Type, 23},
 		{"FreeOSMemory", Func, 1},
 		{"GCStats", Type, 1},
 		{"GCStats.LastGC", Field, 1},
@@ -9672,6 +9938,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"PrintStack", Func, 0},
 		{"ReadBuildInfo", Func, 12},
 		{"ReadGCStats", Func, 1},
+		{"SetCrashOutput", Func, 23},
 		{"SetGCPercent", Func, 1},
 		{"SetMaxStack", Func, 2},
 		{"SetMaxThreads", Func, 2},
@@ -9742,10 +10009,15 @@ var PackageSymbols = map[string][]Symbol{
 		{"WithRegion", Func, 11},
 	},
 	"slices": {
+		{"All", Func, 23},
+		{"AppendSeq", Func, 23},
+		{"Backward", Func, 23},
 		{"BinarySearch", Func, 21},
 		{"BinarySearchFunc", Func, 21},
+		{"Chunk", Func, 23},
 		{"Clip", Func, 21},
 		{"Clone", Func, 21},
+		{"Collect", Func, 23},
 		{"Compact", Func, 21},
 		{"CompactFunc", Func, 21},
 		{"Compare", Func, 21},
@@ -9767,11 +10039,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"MaxFunc", Func, 21},
 		{"Min", Func, 21},
 		{"MinFunc", Func, 21},
+		{"Repeat", Func, 23},
 		{"Replace", Func, 21},
 		{"Reverse", Func, 21},
 		{"Sort", Func, 21},
 		{"SortFunc", Func, 21},
 		{"SortStableFunc", Func, 21},
+		{"Sorted", Func, 23},
+		{"SortedFunc", Func, 23},
+		{"SortedStableFunc", Func, 23},
+		{"Values", Func, 23},
 	},
 	"sort": {
 		{"(Float64Slice).Len", Method, 0},
@@ -9894,6 +10171,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"EqualFold", Func, 0},
 		{"Fields", Func, 0},
 		{"FieldsFunc", Func, 0},
+		{"FieldsFuncSeq", Func, 24},
+		{"FieldsSeq", Func, 24},
 		{"HasPrefix", Func, 0},
 		{"HasSuffix", Func, 0},
 		{"Index", Func, 0},
@@ -9906,6 +10185,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"LastIndexAny", Func, 0},
 		{"LastIndexByte", Func, 5},
 		{"LastIndexFunc", Func, 0},
+		{"Lines", Func, 24},
 		{"Map", Func, 0},
 		{"NewReader", Func, 0},
 		{"NewReplacer", Func, 0},
@@ -9917,7 +10197,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"Split", Func, 0},
 		{"SplitAfter", Func, 0},
 		{"SplitAfterN", Func, 0},
+		{"SplitAfterSeq", Func, 24},
 		{"SplitN", Func, 0},
+		{"SplitSeq", Func, 24},
 		{"Title", Func, 0},
 		{"ToLower", Func, 0},
 		{"ToLowerSpecial", Func, 0},
@@ -9936,10 +10218,14 @@ var PackageSymbols = map[string][]Symbol{
 		{"TrimSpace", Func, 0},
 		{"TrimSuffix", Func, 1},
 	},
+	"structs": {
+		{"HostLayout", Type, 23},
+	},
 	"sync": {
 		{"(*Cond).Broadcast", Method, 0},
 		{"(*Cond).Signal", Method, 0},
 		{"(*Cond).Wait", Method, 0},
+		{"(*Map).Clear", Method, 23},
 		{"(*Map).CompareAndDelete", Method, 20},
 		{"(*Map).CompareAndSwap", Method, 20},
 		{"(*Map).Delete", Method, 9},
@@ -9986,13 +10272,17 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Bool).Store", Method, 19},
 		{"(*Bool).Swap", Method, 19},
 		{"(*Int32).Add", Method, 19},
+		{"(*Int32).And", Method, 23},
 		{"(*Int32).CompareAndSwap", Method, 19},
 		{"(*Int32).Load", Method, 19},
+		{"(*Int32).Or", Method, 23},
 		{"(*Int32).Store", Method, 19},
 		{"(*Int32).Swap", Method, 19},
 		{"(*Int64).Add", Method, 19},
+		{"(*Int64).And", Method, 23},
 		{"(*Int64).CompareAndSwap", Method, 19},
 		{"(*Int64).Load", Method, 19},
+		{"(*Int64).Or", Method, 23},
 		{"(*Int64).Store", Method, 19},
 		{"(*Int64).Swap", Method, 19},
 		{"(*Pointer).CompareAndSwap", Method, 19},
@@ -10000,18 +10290,24 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Pointer).Store", Method, 19},
 		{"(*Pointer).Swap", Method, 19},
 		{"(*Uint32).Add", Method, 19},
+		{"(*Uint32).And", Method, 23},
 		{"(*Uint32).CompareAndSwap", Method, 19},
 		{"(*Uint32).Load", Method, 19},
+		{"(*Uint32).Or", Method, 23},
 		{"(*Uint32).Store", Method, 19},
 		{"(*Uint32).Swap", Method, 19},
 		{"(*Uint64).Add", Method, 19},
+		{"(*Uint64).And", Method, 23},
 		{"(*Uint64).CompareAndSwap", Method, 19},
 		{"(*Uint64).Load", Method, 19},
+		{"(*Uint64).Or", Method, 23},
 		{"(*Uint64).Store", Method, 19},
 		{"(*Uint64).Swap", Method, 19},
 		{"(*Uintptr).Add", Method, 19},
+		{"(*Uintptr).And", Method, 23},
 		{"(*Uintptr).CompareAndSwap", Method, 19},
 		{"(*Uintptr).Load", Method, 19},
+		{"(*Uintptr).Or", Method, 23},
 		{"(*Uintptr).Store", Method, 19},
 		{"(*Uintptr).Swap", Method, 19},
 		{"(*Value).CompareAndSwap", Method, 17},
@@ -10023,6 +10319,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"AddUint32", Func, 0},
 		{"AddUint64", Func, 0},
 		{"AddUintptr", Func, 0},
+		{"AndInt32", Func, 23},
+		{"AndInt64", Func, 23},
+		{"AndUint32", Func, 23},
+		{"AndUint64", Func, 23},
+		{"AndUintptr", Func, 23},
 		{"Bool", Type, 19},
 		{"CompareAndSwapInt32", Func, 0},
 		{"CompareAndSwapInt64", Func, 0},
@@ -10038,6 +10339,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"LoadUint32", Func, 0},
 		{"LoadUint64", Func, 0},
 		{"LoadUintptr", Func, 0},
+		{"OrInt32", Func, 23},
+		{"OrInt64", Func, 23},
+		{"OrUint32", Func, 23},
+		{"OrUint64", Func, 23},
+		{"OrUintptr", Func, 23},
 		{"Pointer", Type, 19},
 		{"StoreInt32", Func, 0},
 		{"StoreInt64", Func, 0},
@@ -16200,6 +16506,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"WSAEACCES", Const, 2},
 		{"WSAECONNABORTED", Const, 9},
 		{"WSAECONNRESET", Const, 3},
+		{"WSAENOPROTOOPT", Const, 23},
 		{"WSAEnumProtocols", Func, 2},
 		{"WSAID_CONNECTEX", Var, 1},
 		{"WSAIoctl", Func, 0},
@@ -16308,7 +16615,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"ValueOf", Func, 0},
 	},
 	"testing": {
+		{"(*B).Chdir", Method, 24},
 		{"(*B).Cleanup", Method, 14},
+		{"(*B).Context", Method, 24},
 		{"(*B).Elapsed", Method, 20},
 		{"(*B).Error", Method, 0},
 		{"(*B).Errorf", Method, 0},
@@ -16320,6 +16629,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*B).Helper", Method, 9},
 		{"(*B).Log", Method, 0},
 		{"(*B).Logf", Method, 0},
+		{"(*B).Loop", Method, 24},
 		{"(*B).Name", Method, 8},
 		{"(*B).ReportAllocs", Method, 1},
 		{"(*B).ReportMetric", Method, 13},
@@ -16337,7 +16647,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*B).StopTimer", Method, 0},
 		{"(*B).TempDir", Method, 15},
 		{"(*F).Add", Method, 18},
+		{"(*F).Chdir", Method, 24},
 		{"(*F).Cleanup", Method, 18},
+		{"(*F).Context", Method, 24},
 		{"(*F).Error", Method, 18},
 		{"(*F).Errorf", Method, 18},
 		{"(*F).Fail", Method, 18},
@@ -16358,7 +16670,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*F).TempDir", Method, 18},
 		{"(*M).Run", Method, 4},
 		{"(*PB).Next", Method, 3},
+		{"(*T).Chdir", Method, 24},
 		{"(*T).Cleanup", Method, 14},
+		{"(*T).Context", Method, 24},
 		{"(*T).Deadline", Method, 15},
 		{"(*T).Error", Method, 0},
 		{"(*T).Errorf", Method, 0},
@@ -16849,7 +17163,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Time).Add", Method, 0},
 		{"(Time).AddDate", Method, 0},
 		{"(Time).After", Method, 0},
+		{"(Time).AppendBinary", Method, 24},
 		{"(Time).AppendFormat", Method, 5},
+		{"(Time).AppendText", Method, 24},
 		{"(Time).Before", Method, 0},
 		{"(Time).Clock", Method, 0},
 		{"(Time).Compare", Method, 20},
@@ -17284,6 +17600,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Encode", Func, 0},
 		{"EncodeRune", Func, 0},
 		{"IsSurrogate", Func, 0},
+		{"RuneLen", Func, 23},
 	},
 	"unicode/utf8": {
 		{"AppendRune", Func, 18},
@@ -17306,6 +17623,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"ValidRune", Func, 1},
 		{"ValidString", Func, 0},
 	},
+	"unique": {
+		{"(Handle).Value", Method, 23},
+		{"Handle", Type, 23},
+		{"Make", Func, 23},
+	},
 	"unsafe": {
 		{"Add", Func, 0},
 		{"Alignof", Func, 0},
@@ -17317,4 +17639,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"String", Func, 0},
 		{"StringData", Func, 0},
 	},
+	"weak": {
+		{"(Pointer).Value", Method, 24},
+		{"Make", Func, 24},
+		{"Pointer", Type, 24},
+	},
 }
diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
index 98904017..3d96d3bf 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
@@ -6,7 +6,7 @@
 
 // Package stdlib provides a table of all exported symbols in the
 // standard library, along with the version at which they first
-// appeared.
+// appeared. It also provides the import graph of std packages.
 package stdlib
 
 import (
diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
deleted file mode 100644
index ff9437a3..00000000
--- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// package tokeninternal provides access to some internal features of the token
-// package.
-package tokeninternal
-
-import (
-	"fmt"
-	"go/token"
-	"sort"
-	"sync"
-	"unsafe"
-)
-
-// GetLines returns the table of line-start offsets from a token.File.
-func GetLines(file *token.File) []int {
-	// token.File has a Lines method on Go 1.21 and later.
-	if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
-		return file.Lines()
-	}
-
-	// This declaration must match that of token.File.
-	// This creates a risk of dependency skew.
-	// For now we check that the size of the two
-	// declarations is the same, on the (fragile) assumption
-	// that future changes would add fields.
-	type tokenFile119 struct {
-		_     string
-		_     int
-		_     int
-		mu    sync.Mutex // we're not complete monsters
-		lines []int
-		_     []struct{}
-	}
-
-	if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) {
-		panic("unexpected token.File size")
-	}
-	var ptr *tokenFile119
-	type uP = unsafe.Pointer
-	*(*uP)(uP(&ptr)) = uP(file)
-	ptr.mu.Lock()
-	defer ptr.mu.Unlock()
-	return ptr.lines
-}
-
-// AddExistingFiles adds the specified files to the FileSet if they
-// are not already present. It panics if any pair of files in the
-// resulting FileSet would overlap.
-func AddExistingFiles(fset *token.FileSet, files []*token.File) {
-	// Punch through the FileSet encapsulation.
-	type tokenFileSet struct {
-		// This type remained essentially consistent from go1.16 to go1.21.
-		mutex sync.RWMutex
-		base  int
-		files []*token.File
-		_     *token.File // changed to atomic.Pointer[token.File] in go1.19
-	}
-
-	// If the size of token.FileSet changes, this will fail to compile.
-	const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
-	var _ [-delta * delta]int
-
-	type uP = unsafe.Pointer
-	var ptr *tokenFileSet
-	*(*uP)(uP(&ptr)) = uP(fset)
-	ptr.mutex.Lock()
-	defer ptr.mutex.Unlock()
-
-	// Merge and sort.
-	newFiles := append(ptr.files, files...)
-	sort.Slice(newFiles, func(i, j int) bool {
-		return newFiles[i].Base() < newFiles[j].Base()
-	})
-
-	// Reject overlapping files.
-	// Discard adjacent identical files.
-	out := newFiles[:0]
-	for i, file := range newFiles {
-		if i > 0 {
-			prev := newFiles[i-1]
-			if file == prev {
-				continue
-			}
-			if prev.Base()+prev.Size()+1 > file.Base() {
-				panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
-					prev.Name(), prev.Base(), prev.Base()+prev.Size(),
-					file.Name(), file.Base(), file.Base()+file.Size()))
-			}
-		}
-		out = append(out, file)
-	}
-	newFiles = out
-
-	ptr.files = newFiles
-
-	// Advance FileSet.Base().
-	if len(newFiles) > 0 {
-		last := newFiles[len(newFiles)-1]
-		newBase := last.Base() + last.Size() + 1
-		if ptr.base < newBase {
-			ptr.base = newBase
-		}
-	}
-}
-
-// FileSetFor returns a new FileSet containing a sequence of new Files with
-// the same base, size, and line as the input files, for use in APIs that
-// require a FileSet.
-//
-// Precondition: the input files must be non-overlapping, and sorted in order
-// of their Base.
-func FileSetFor(files ...*token.File) *token.FileSet {
-	fset := token.NewFileSet()
-	for _, f := range files {
-		f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
-		lines := GetLines(f)
-		f2.SetLines(lines)
-	}
-	return fset
-}
-
-// CloneFileSet creates a new FileSet holding all files in fset. It does not
-// create copies of the token.Files in fset: they are added to the resulting
-// FileSet unmodified.
-func CloneFileSet(fset *token.FileSet) *token.FileSet {
-	var files []*token.File
-	fset.Iterate(func(f *token.File) bool {
-		files = append(files, f)
-		return true
-	})
-	newFileSet := token.NewFileSet()
-	AddExistingFiles(newFileSet, files)
-	return newFileSet
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
new file mode 100644
index 00000000..cdae2b8e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -0,0 +1,68 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams contains common utilities for writing tools that
+// interact with generic Go code, as introduced with Go 1.18. It
+// supplements the standard library APIs. Notably, the StructuralTerms
+// API computes a minimal representation of the structural
+// restrictions on a type parameter.
+//
+// An external version of these APIs is available in the
+// golang.org/x/exp/typeparams module.
+package typeparams
+
+import (
+	"go/ast"
+	"go/token"
+	"go/types"
+)
+
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+	switch e := n.(type) {
+	case *ast.IndexExpr:
+		return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+	case *ast.IndexListExpr:
+		return e.X, e.Lbrack, e.Indices, e.Rbrack
+	}
+	return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+	switch len(indices) {
+	case 0:
+		panic("empty indices")
+	case 1:
+		return &ast.IndexExpr{
+			X:      x,
+			Lbrack: lbrack,
+			Index:  indices[0],
+			Rbrack: rbrack,
+		}
+	default:
+		return &ast.IndexListExpr{
+			X:       x,
+			Lbrack:  lbrack,
+			Indices: indices,
+			Rbrack:  rbrack,
+		}
+	}
+}
+
+// IsTypeParam reports whether t is a type parameter (or an alias of one).
+func IsTypeParam(t types.Type) bool {
+	_, ok := types.Unalias(t).(*types.TypeParam)
+	return ok
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
new file mode 100644
index 00000000..27a2b179
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -0,0 +1,155 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+	"fmt"
+	"go/types"
+)
+
+// CoreType returns the core type of T or nil if T does not have a core type.
+//
+// See https://go.dev/ref/spec#Core_types for the definition of a core type.
+func CoreType(T types.Type) types.Type {
+	U := T.Underlying()
+	if _, ok := U.(*types.Interface); !ok {
+		return U // for non-interface types,
+	}
+
+	terms, err := NormalTerms(U)
+	if len(terms) == 0 || err != nil {
+		// len(terms) -> empty type set of interface.
+		// err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
+		return nil // no core type.
+	}
+
+	U = terms[0].Type().Underlying()
+	var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
+	for identical = 1; identical < len(terms); identical++ {
+		if !types.Identical(U, terms[identical].Type().Underlying()) {
+			break
+		}
+	}
+
+	if identical == len(terms) {
+		// https://go.dev/ref/spec#Core_types
+		// "There is a single type U which is the underlying type of all types in the type set of T"
+		return U
+	}
+	ch, ok := U.(*types.Chan)
+	if !ok {
+		return nil // no core type as identical < len(terms) and U is not a channel.
+	}
+	// https://go.dev/ref/spec#Core_types
+	// "the type chan E if T contains only bidirectional channels, or the type chan<- E or
+	// <-chan E depending on the direction of the directional channels present."
+	for chans := identical; chans < len(terms); chans++ {
+		curr, ok := terms[chans].Type().Underlying().(*types.Chan)
+		if !ok {
+			return nil
+		}
+		if !types.Identical(ch.Elem(), curr.Elem()) {
+			return nil // channel elements are not identical.
+		}
+		if ch.Dir() == types.SendRecv {
+			// ch is bidirectional. We can safely always use curr's direction.
+			ch = curr
+		} else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
+			// ch and curr are not bidirectional and not the same direction.
+			return nil
+		}
+	}
+	return ch
+}
+
+// NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+//	type A interface{ ~string|~[]byte }
+//
+//	type B interface{ int|string }
+//
+//	type C interface { ~string|~int }
+//
+//	type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, NormalTerms returns ErrEmptyTypeSet.
+//
+// NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func NormalTerms(T types.Type) ([]*types.Term, error) {
+	// typeSetOf(T) == typeSetOf(Unalias(T))
+	typ := types.Unalias(T)
+	if named, ok := typ.(*types.Named); ok {
+		typ = named.Underlying()
+	}
+	switch typ := typ.(type) {
+	case *types.TypeParam:
+		return StructuralTerms(typ)
+	case *types.Union:
+		return UnionTermSet(typ)
+	case *types.Interface:
+		return InterfaceTermSet(typ)
+	default:
+		return []*types.Term{types.NewTerm(false, T)}, nil
+	}
+}
+
+// Deref returns the type of the variable pointed to by t,
+// if t's core type is a pointer; otherwise it returns t.
+//
+// Do not assume that Deref(T)==T implies T is not a pointer:
+// consider "type T *T", for example.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func Deref(t types.Type) types.Type {
+	if ptr, ok := CoreType(t).(*types.Pointer); ok {
+		return ptr.Elem()
+	}
+	return t
+}
+
+// MustDeref returns the type of the variable pointed to by t.
+// It panics if t's core type is not a pointer.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func MustDeref(t types.Type) types.Type {
+	if ptr, ok := CoreType(t).(*types.Pointer); ok {
+		return ptr.Elem()
+	}
+	panic(fmt.Sprintf("%v is not a pointer", t))
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go
new file mode 100644
index 00000000..709d2fc1
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/free.go
@@ -0,0 +1,131 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+	"go/types"
+
+	"golang.org/x/tools/internal/aliases"
+)
+
+// Free is a memoization of the set of free type parameters within a
+// type. It makes a sequence of calls to [Free.Has] for overlapping
+// types more efficient. The zero value is ready for use.
+//
+// NOTE: Adapted from go/types/infer.go. If it is later exported, factor.
+type Free struct {
+	seen map[types.Type]bool
+}
+
+// Has reports whether the specified type has a free type parameter.
+func (w *Free) Has(typ types.Type) (res bool) {
+	// detect cycles
+	if x, ok := w.seen[typ]; ok {
+		return x
+	}
+	if w.seen == nil {
+		w.seen = make(map[types.Type]bool)
+	}
+	w.seen[typ] = false
+	defer func() {
+		w.seen[typ] = res
+	}()
+
+	switch t := typ.(type) {
+	case nil, *types.Basic: // TODO(gri) should nil be handled here?
+		break
+
+	case *types.Alias:
+		if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
+			return true // This is an uninstantiated Alias.
+		}
+		// The expansion of an alias can have free type parameters,
+		// whether or not the alias itself has type parameters:
+		//
+		//   func _[K comparable]() {
+		//     type Set      = map[K]bool // free(Set)      = {K}
+		//     type MapTo[V] = map[K]V    // free(Map[foo]) = {V}
+		//   }
+		//
+		// So, we must Unalias.
+		return w.Has(types.Unalias(t))
+
+	case *types.Array:
+		return w.Has(t.Elem())
+
+	case *types.Slice:
+		return w.Has(t.Elem())
+
+	case *types.Struct:
+		for i, n := 0, t.NumFields(); i < n; i++ {
+			if w.Has(t.Field(i).Type()) {
+				return true
+			}
+		}
+
+	case *types.Pointer:
+		return w.Has(t.Elem())
+
+	case *types.Tuple:
+		n := t.Len()
+		for i := range n {
+			if w.Has(t.At(i).Type()) {
+				return true
+			}
+		}
+
+	case *types.Signature:
+		// t.tparams may not be nil if we are looking at a signature
+		// of a generic function type (or an interface method) that is
+		// part of the type we're testing. We don't care about these type
+		// parameters.
+		// Similarly, the receiver of a method may declare (rather than
+		// use) type parameters, we don't care about those either.
+		// Thus, we only need to look at the input and result parameters.
+		return w.Has(t.Params()) || w.Has(t.Results())
+
+	case *types.Interface:
+		for i, n := 0, t.NumMethods(); i < n; i++ {
+			if w.Has(t.Method(i).Type()) {
+				return true
+			}
+		}
+		terms, err := InterfaceTermSet(t)
+		if err != nil {
+			return false // ill typed
+		}
+		for _, term := range terms {
+			if w.Has(term.Type()) {
+				return true
+			}
+		}
+
+	case *types.Map:
+		return w.Has(t.Key()) || w.Has(t.Elem())
+
+	case *types.Chan:
+		return w.Has(t.Elem())
+
+	case *types.Named:
+		args := t.TypeArgs()
+		if params := t.TypeParams(); params.Len() > args.Len() {
+			return true // this is an uninstantiated named type.
+		}
+		for i, n := 0, args.Len(); i < n; i++ {
+			if w.Has(args.At(i)) {
+				return true
+			}
+		}
+		return w.Has(t.Underlying()) // recurse for types local to parameterized functions
+
+	case *types.TypeParam:
+		return true
+
+	default:
+		panic(t) // unreachable
+	}
+
+	return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
new file mode 100644
index 00000000..f49802b8
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -0,0 +1,218 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+	"errors"
+	"fmt"
+	"go/types"
+	"os"
+	"strings"
+)
+
+//go:generate go run copytermlist.go
+
+const debug = false
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+//
+//	type T[P interface{~int; m()}] int
+//
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+//	type A interface{ ~string|~[]byte }
+//
+//	type B interface{ int|string }
+//
+//	type C interface { ~string|~int }
+//
+//	type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
+	constraint := tparam.Constraint()
+	if constraint == nil {
+		return nil, fmt.Errorf("%s has nil constraint", tparam)
+	}
+	iface, _ := constraint.Underlying().(*types.Interface)
+	if iface == nil {
+		return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+	}
+	return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
+	return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *types.Union) ([]*types.Term, error) {
+	return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*types.Term, error) {
+	tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+	if err != nil {
+		return nil, err
+	}
+	if tset.terms.isEmpty() {
+		return nil, ErrEmptyTypeSet
+	}
+	if tset.terms.isAll() {
+		return nil, nil
+	}
+	var terms []*types.Term
+	for _, term := range tset.terms {
+		terms = append(terms, types.NewTerm(term.tilde, term.typ))
+	}
+	return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+	complete bool
+	terms    termlist
+}
+
+func indentf(depth int, format string, args ...any) {
+	fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+	if t == nil {
+		panic("nil type")
+	}
+
+	if debug {
+		indentf(depth, "%s", t.String())
+		defer func() {
+			if err != nil {
+				indentf(depth, "=> %s", err)
+			} else {
+				indentf(depth, "=> %s", res.terms.String())
+			}
+		}()
+	}
+
+	const maxTermCount = 100
+	if tset, ok := seen[t]; ok {
+		if !tset.complete {
+			return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+		}
+		return tset, nil
+	}
+
+	// Mark the current type as seen to avoid infinite recursion.
+	tset := new(termSet)
+	defer func() {
+		tset.complete = true
+	}()
+	seen[t] = tset
+
+	switch u := t.Underlying().(type) {
+	case *types.Interface:
+		// The term set of an interface is the intersection of the term sets of its
+		// embedded types.
+		tset.terms = allTermlist
+		for i := 0; i < u.NumEmbeddeds(); i++ {
+			embedded := u.EmbeddedType(i)
+			if _, ok := embedded.Underlying().(*types.TypeParam); ok {
+				return nil, fmt.Errorf("invalid embedded type %T", embedded)
+			}
+			tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+			if err != nil {
+				return nil, err
+			}
+			tset.terms = tset.terms.intersect(tset2.terms)
+		}
+	case *types.Union:
+		// The term set of a union is the union of term sets of its terms.
+		tset.terms = nil
+		for i := 0; i < u.Len(); i++ {
+			t := u.Term(i)
+			var terms termlist
+			switch t.Type().Underlying().(type) {
+			case *types.Interface:
+				tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+				if err != nil {
+					return nil, err
+				}
+				terms = tset2.terms
+			case *types.TypeParam, *types.Union:
+				// A stand-alone type parameter or union is not permitted as union
+				// term.
+				return nil, fmt.Errorf("invalid union term %T", t)
+			default:
+				if t.Type() == types.Typ[types.Invalid] {
+					continue
+				}
+				terms = termlist{{t.Tilde(), t.Type()}}
+			}
+			tset.terms = tset.terms.union(terms)
+			if len(tset.terms) > maxTermCount {
+				return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+			}
+		}
+	case *types.TypeParam:
+		panic("unreachable")
+	default:
+		// For all other types, the term set is just a single non-tilde term
+		// holding the type itself.
+		if u != types.Typ[types.Invalid] {
+			tset.terms = termlist{{false, t}}
+		}
+	}
+	return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+	return t.Underlying()
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
new file mode 100644
index 00000000..9bc29143
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -0,0 +1,169 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+// Source: ../../cmd/compile/internal/types2/termlist.go
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+	"go/types"
+	"strings"
+)
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// termSep is the separator used between individual terms.
+const termSep = " | "
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+	if len(xl) == 0 {
+		return "∅"
+	}
+	var buf strings.Builder
+	for i, x := range xl {
+		if i > 0 {
+			buf.WriteString(termSep)
+		}
+		buf.WriteString(x.String())
+	}
+	return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+	// If there's a non-nil term, the entire list is not empty.
+	// If the termlist is in normal form, this requires at most
+	// one iteration.
+	for _, x := range xl {
+		if x != nil {
+			return false
+		}
+	}
+	return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+	// If there's a 𝓤 term, the entire list is 𝓤.
+	// If the termlist is in normal form, this requires at most
+	// one iteration.
+	for _, x := range xl {
+		if x != nil && x.typ == nil {
+			return true
+		}
+	}
+	return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+	// Quadratic algorithm, but good enough for now.
+	// TODO(gri) fix asymptotic performance
+	used := make([]bool, len(xl))
+	var rl termlist
+	for i, xi := range xl {
+		if xi == nil || used[i] {
+			continue
+		}
+		for j := i + 1; j < len(xl); j++ {
+			xj := xl[j]
+			if xj == nil || used[j] {
+				continue
+			}
+			if u1, u2 := xi.union(xj); u2 == nil {
+				// If we encounter a 𝓤 term, the entire list is 𝓤.
+				// Exit early.
+				// (Note that this is not just an optimization;
+				// if we continue, we may end up with a 𝓤 term
+				// and other terms and the result would not be
+				// in normal form.)
+				if u1.typ == nil {
+					return allTermlist
+				}
+				xi = u1
+				used[j] = true // xj is now unioned into xi - ignore it in future iterations
+			}
+		}
+		rl = append(rl, xi)
+	}
+	return rl
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+	return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+	if xl.isEmpty() || yl.isEmpty() {
+		return nil
+	}
+
+	// Quadratic algorithm, but good enough for now.
+	// TODO(gri) fix asymptotic performance
+	var rl termlist
+	for _, x := range xl {
+		for _, y := range yl {
+			if r := x.intersect(y); r != nil {
+				rl = append(rl, r)
+			}
+		}
+	}
+	return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+	// TODO(gri) this should be more efficient
+	return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+	for _, x := range xl {
+		if x.includes(t) {
+			return true
+		}
+	}
+	return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+	for _, x := range xl {
+		if y.subsetOf(x) {
+			return true
+		}
+	}
+	return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+	if yl.isEmpty() {
+		return xl.isEmpty()
+	}
+
+	// each term x of xl must be a subset of yl
+	for _, x := range xl {
+		if !yl.supersetOf(x) {
+			return false // x is not a subset yl
+		}
+	}
+	return true
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
new file mode 100644
index 00000000..fa758cdc
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -0,0 +1,172 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+// Source: ../../cmd/compile/internal/types2/typeterm.go
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+//	 ∅:  (*term)(nil)     == ∅                      // set of no types (empty set)
+//	 𝓤:  &term{}          == 𝓤                      // set of all types (𝓤niverse)
+//	 T:  &term{false, T}  == {T}                    // set of type T
+//	~t:  &term{true, t}   == {t' | under(t') == t}  // set of types with underlying type t
+type term struct {
+	tilde bool // valid if typ != nil
+	typ   types.Type
+}
+
+func (x *term) String() string {
+	switch {
+	case x == nil:
+		return "∅"
+	case x.typ == nil:
+		return "𝓤"
+	case x.tilde:
+		return "~" + x.typ.String()
+	default:
+		return x.typ.String()
+	}
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+	// easy cases
+	switch {
+	case x == nil || y == nil:
+		return x == y
+	case x.typ == nil || y.typ == nil:
+		return x.typ == y.typ
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+	// easy cases
+	switch {
+	case x == nil && y == nil:
+		return nil, nil // ∅ ∪ ∅ == ∅
+	case x == nil:
+		return y, nil // ∅ ∪ y == y
+	case y == nil:
+		return x, nil // x ∪ ∅ == x
+	case x.typ == nil:
+		return x, nil // 𝓤 ∪ y == 𝓤
+	case y.typ == nil:
+		return y, nil // x ∪ 𝓤 == 𝓤
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ∪ ~t == ~t
+	// ~t ∪  T == ~t
+	//  T ∪ ~t == ~t
+	//  T ∪  T ==  T
+	if x.tilde || !y.tilde {
+		return x, nil
+	}
+	return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+	// easy cases
+	switch {
+	case x == nil || y == nil:
+		return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+	case x.typ == nil:
+		return y // 𝓤 ∩ y == y
+	case y.typ == nil:
+		return x // x ∩ 𝓤 == x
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return nil // x ∩ y == ∅ if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ∩ ~t == ~t
+	// ~t ∩  T ==  T
+	//  T ∩ ~t ==  T
+	//  T ∩  T ==  T
+	if !x.tilde || y.tilde {
+		return x
+	}
+	return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+	// easy cases
+	switch {
+	case x == nil:
+		return false // t ∈ ∅ == false
+	case x.typ == nil:
+		return true // t ∈ 𝓤 == true
+	}
+	// ∅ ⊂ x ⊂ 𝓤
+
+	u := t
+	if x.tilde {
+		u = under(u)
+	}
+	return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+	// easy cases
+	switch {
+	case x == nil:
+		return true // ∅ ⊆ y == true
+	case y == nil:
+		return false // x ⊆ ∅ == false since x != ∅
+	case y.typ == nil:
+		return true // x ⊆ 𝓤 == true
+	case x.typ == nil:
+		return false // 𝓤 ⊆ y == false since y != 𝓤
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return false // x ⊆ y == false if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ⊆ ~t == true
+	// ~t ⊆ T == false
+	//  T ⊆ ~t == true
+	//  T ⊆  T == true
+	return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+	if debug && (x.typ == nil || y.typ == nil) {
+		panic("invalid argument(s)")
+	}
+	ux := x.typ
+	if y.tilde {
+		ux = under(ux)
+	}
+	uy := y.typ
+	if x.tilde {
+		uy = under(uy)
+	}
+	return !types.Identical(ux, uy)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
new file mode 100644
index 00000000..649c82b6
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
@@ -0,0 +1,135 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+	"fmt"
+	"go/ast"
+	"go/types"
+	_ "unsafe"
+)
+
+// CallKind describes the function position of an [*ast.CallExpr].
+type CallKind int
+
+const (
+	CallStatic     CallKind = iota // static call to known function
+	CallInterface                  // dynamic call through an interface method
+	CallDynamic                    // dynamic call of a func value
+	CallBuiltin                    // call to a builtin function
+	CallConversion                 // a conversion (not a call)
+)
+
+var callKindNames = []string{
+	"CallStatic",
+	"CallInterface",
+	"CallDynamic",
+	"CallBuiltin",
+	"CallConversion",
+}
+
+func (k CallKind) String() string {
+	if i := int(k); i >= 0 && i < len(callKindNames) {
+		return callKindNames[i]
+	}
+	return fmt.Sprintf("typeutil.CallKind(%d)", k)
+}
+
+// ClassifyCall classifies the function position of a call expression ([*ast.CallExpr]).
+// It distinguishes among true function calls, calls to builtins, and type conversions,
+// and further classifies function calls as static calls (where the function is known),
+// dynamic interface calls, and other dynamic calls.
+//
+// For the declarations:
+//
+//	func f() {}
+//	func g[T any]() {}
+//	var v func()
+//	var s []func()
+//	type I interface { M() }
+//	var i I
+//
+// ClassifyCall returns the following:
+//
+//	f()           CallStatic
+//	g[int]()      CallStatic
+//	i.M()         CallInterface
+//	min(1, 2)     CallBuiltin
+//	v()           CallDynamic
+//	s[0]()        CallDynamic
+//	int(x)        CallConversion
+//	[]byte("")    CallConversion
+func ClassifyCall(info *types.Info, call *ast.CallExpr) CallKind {
+	if info.Types == nil {
+		panic("ClassifyCall: info.Types is nil")
+	}
+	if info.Types[call.Fun].IsType() {
+		return CallConversion
+	}
+	obj := info.Uses[UsedIdent(info, call.Fun)]
+	// Classify the call by the type of the object, if any.
+	switch obj := obj.(type) {
+	case *types.Builtin:
+		return CallBuiltin
+	case *types.Func:
+		if interfaceMethod(obj) {
+			return CallInterface
+		}
+		return CallStatic
+	default:
+		return CallDynamic
+	}
+}
+
+// UsedIdent returns the identifier such that info.Uses[UsedIdent(info, e)]
+// is the [types.Object] used by e, if any.
+//
+// If e is one of various forms of reference:
+//
+//	f, c, v, T           lexical reference
+//	pkg.X                qualified identifier
+//	f[T] or pkg.F[K,V]   instantiations of the above kinds
+//	expr.f               field or method value selector
+//	T.f                  method expression selector
+//
+// UsedIdent returns the identifier whose is associated value in [types.Info.Uses]
+// is the object to which it refers.
+//
+// For the declarations:
+//
+//	func F[T any] {...}
+//	type I interface { M() }
+//	var (
+//	  x int
+//	  s struct { f  int }
+//	  a []int
+//	  i I
+//	)
+//
+// UsedIdent returns the following:
+//
+//	Expr          UsedIdent
+//	x             x
+//	s.f           f
+//	F[int]        F
+//	i.M           M
+//	I.M           M
+//	min           min
+//	int           int
+//	1             nil
+//	a[0]          nil
+//	[]byte        nil
+//
+// Note: if e is an instantiated function or method, UsedIdent returns
+// the corresponding generic function or method on the generic type.
+func UsedIdent(info *types.Info, e ast.Expr) *ast.Ident {
+	return usedIdent(info, e)
+}
+
+//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent
+func usedIdent(info *types.Info, e ast.Expr) *ast.Ident
+
+//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod
+func interfaceMethod(f *types.Func) bool
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go
new file mode 100644
index 00000000..4957f021
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go
@@ -0,0 +1,133 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+	"fmt"
+	"go/types"
+
+	"golang.org/x/tools/go/types/typeutil"
+)
+
+// ForEachElement calls f for type T and each type reachable from its
+// type through reflection. It does this by recursively stripping off
+// type constructors; in addition, for each named type N, the type *N
+// is added to the result as it may have additional methods.
+//
+// The caller must provide an initially empty set used to de-duplicate
+// identical types, potentially across multiple calls to ForEachElement.
+// (Its final value holds all the elements seen, matching the arguments
+// passed to f.)
+//
+// TODO(adonovan): share/harmonize with go/callgraph/rta.
+func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) {
+	var visit func(T types.Type, skip bool)
+	visit = func(T types.Type, skip bool) {
+		if !skip {
+			if seen, _ := rtypes.Set(T, true).(bool); seen {
+				return // de-dup
+			}
+
+			f(T) // notify caller of new element type
+		}
+
+		// Recursion over signatures of each method.
+		tmset := msets.MethodSet(T)
+		for i := 0; i < tmset.Len(); i++ {
+			sig := tmset.At(i).Type().(*types.Signature)
+			// It is tempting to call visit(sig, false)
+			// but, as noted in golang.org/cl/65450043,
+			// the Signature.Recv field is ignored by
+			// types.Identical and typeutil.Map, which
+			// is confusing at best.
+			//
+			// More importantly, the true signature rtype
+			// reachable from a method using reflection
+			// has no receiver but an extra ordinary parameter.
+			// For the Read method of io.Reader we want:
+			//   func(Reader, []byte) (int, error)
+			// but here sig is:
+			//   func([]byte) (int, error)
+			// with .Recv = Reader (though it is hard to
+			// notice because it doesn't affect Signature.String
+			// or types.Identical).
+			//
+			// TODO(adonovan): construct and visit the correct
+			// non-method signature with an extra parameter
+			// (though since unnamed func types have no methods
+			// there is essentially no actual demand for this).
+			//
+			// TODO(adonovan): document whether or not it is
+			// safe to skip non-exported methods (as RTA does).
+			visit(sig.Params(), true)  // skip the Tuple
+			visit(sig.Results(), true) // skip the Tuple
+		}
+
+		switch T := T.(type) {
+		case *types.Alias:
+			visit(types.Unalias(T), skip) // emulates the pre-Alias behavior
+
+		case *types.Basic:
+			// nop
+
+		case *types.Interface:
+			// nop---handled by recursion over method set.
+
+		case *types.Pointer:
+			visit(T.Elem(), false)
+
+		case *types.Slice:
+			visit(T.Elem(), false)
+
+		case *types.Chan:
+			visit(T.Elem(), false)
+
+		case *types.Map:
+			visit(T.Key(), false)
+			visit(T.Elem(), false)
+
+		case *types.Signature:
+			if T.Recv() != nil {
+				panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv()))
+			}
+			visit(T.Params(), true)  // skip the Tuple
+			visit(T.Results(), true) // skip the Tuple
+
+		case *types.Named:
+			// A pointer-to-named type can be derived from a named
+			// type via reflection.  It may have methods too.
+			visit(types.NewPointer(T), false)
+
+			// Consider 'type T struct{S}' where S has methods.
+			// Reflection provides no way to get from T to struct{S},
+			// only to S, so the method set of struct{S} is unwanted,
+			// so set 'skip' flag during recursion.
+			visit(T.Underlying(), true) // skip the unnamed type
+
+		case *types.Array:
+			visit(T.Elem(), false)
+
+		case *types.Struct:
+			for i, n := 0, T.NumFields(); i < n; i++ {
+				// TODO(adonovan): document whether or not
+				// it is safe to skip non-exported fields.
+				visit(T.Field(i).Type(), false)
+			}
+
+		case *types.Tuple:
+			for i, n := 0, T.Len(); i < n; i++ {
+				visit(T.At(i).Type(), false)
+			}
+
+		case *types.TypeParam, *types.Union:
+			// forEachReachable must not be called on parameterized types.
+			panic(T)
+
+		default:
+			panic(T)
+		}
+	}
+	visit(T, false)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
index 834e0538..235a6def 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -838,7 +838,7 @@ const (
 	// InvalidCap occurs when an argument to the cap built-in function is not of
 	// supported type.
 	//
-	// See https://golang.org/ref/spec#Lengthand_capacity for information on
+	// See https://golang.org/ref/spec#Length_and_capacity for information on
 	// which underlying types are supported as arguments to cap and len.
 	//
 	// Example:
@@ -859,7 +859,7 @@ const (
 	// InvalidCopy occurs when the arguments are not of slice type or do not
 	// have compatible type.
 	//
-	// See https://golang.org/ref/spec#Appendingand_copying_slices for more
+	// See https://golang.org/ref/spec#Appending_and_copying_slices for more
 	// information on the type requirements for the copy built-in.
 	//
 	// Example:
@@ -897,7 +897,7 @@ const (
 	// InvalidLen occurs when an argument to the len built-in function is not of
 	// supported type.
 	//
-	// See https://golang.org/ref/spec#Lengthand_capacity for information on
+	// See https://golang.org/ref/spec#Length_and_capacity for information on
 	// which underlying types are supported as arguments to cap and len.
 	//
 	// Example:
@@ -914,7 +914,7 @@ const (
 
 	// InvalidMake occurs when make is called with an unsupported type argument.
 	//
-	// See https://golang.org/ref/spec#Makingslices_maps_and_channels for
+	// See https://golang.org/ref/spec#Making_slices_maps_and_channels for
 	// information on the types that may be created using make.
 	//
 	// Example:
@@ -966,7 +966,7 @@ const (
 	//  var _ = string(x)
 	InvalidConversion
 
-	// InvalidUntypedConversion occurs when an there is no valid implicit
+	// InvalidUntypedConversion occurs when there is no valid implicit
 	// conversion from an untyped value satisfying the type constraints of the
 	// context in which it is used.
 	//
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
new file mode 100644
index 00000000..b64f714e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
@@ -0,0 +1,46 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+	"go/ast"
+	"go/types"
+	"strconv"
+)
+
+// FileQualifier returns a [types.Qualifier] function that qualifies
+// imported symbols appropriately based on the import environment of a given
+// file.
+// If the same package is imported multiple times, the last appearance is
+// recorded.
+func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
+	// Construct mapping of import paths to their defined names.
+	// It is only necessary to look at renaming imports.
+	imports := make(map[string]string)
+	for _, imp := range f.Imports {
+		if imp.Name != nil && imp.Name.Name != "_" {
+			path, _ := strconv.Unquote(imp.Path.Value)
+			imports[path] = imp.Name.Name
+		}
+	}
+
+	// Define qualifier to replace full package paths with names of the imports.
+	return func(p *types.Package) string {
+		if p == nil || p == pkg {
+			return ""
+		}
+
+		if name, ok := imports[p.Path()]; ok {
+			if name == "." {
+				return ""
+			} else {
+				return name
+			}
+		}
+
+		// If there is no local renaming, fall back to the package name.
+		return p.Name()
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
index fea7c8b7..8352ea76 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
@@ -6,20 +6,21 @@ package typesinternal
 
 import (
 	"go/types"
-
-	"golang.org/x/tools/internal/aliases"
 )
 
 // ReceiverNamed returns the named type (if any) associated with the
 // type of recv, which may be of the form N or *N, or aliases thereof.
 // It also reports whether a Pointer was present.
+//
+// The named result may be nil if recv is from a method on an
+// anonymous interface or struct types or in ill-typed code.
 func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
 	t := recv.Type()
-	if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
+	if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
 		isPtr = true
 		t = ptr.Elem()
 	}
-	named, _ = aliases.Unalias(t).(*types.Named)
+	named, _ = types.Unalias(t).(*types.Named)
 	return
 }
 
@@ -36,7 +37,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
 // indirection from the type, regardless of named types (analogous to
 // a LOAD instruction).
 func Unpointer(t types.Type) types.Type {
-	if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
+	if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
 		return ptr.Elem()
 	}
 	return t
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index 7c77c2fb..cc244689 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -7,10 +7,13 @@
 package typesinternal
 
 import (
+	"go/ast"
 	"go/token"
 	"go/types"
 	"reflect"
 	"unsafe"
+
+	"golang.org/x/tools/internal/aliases"
 )
 
 func SetUsesCgo(conf *types.Config) bool {
@@ -30,12 +33,14 @@ func SetUsesCgo(conf *types.Config) bool {
 	return true
 }
 
-// ReadGo116ErrorData extracts additional information from types.Error values
+// ErrorCodeStartEnd extracts additional information from types.Error values
 // generated by Go version 1.16 and later: the error code, start position, and
 // end position. If all positions are valid, start <= err.Pos <= end.
 //
 // If the data could not be read, the final result parameter will be false.
-func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
+//
+// TODO(adonovan): eliminate start/end when proposal #71803 is accepted.
+func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
 	var data [3]int
 	// By coincidence all of these fields are ints, which simplifies things.
 	v := reflect.ValueOf(err)
@@ -48,3 +53,92 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
 	}
 	return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
 }
+
+// NameRelativeTo returns a types.Qualifier that qualifies members of
+// all packages other than pkg, using only the package name.
+// (By contrast, [types.RelativeTo] uses the complete package path,
+// which is often excessive.)
+//
+// If pkg is nil, it is equivalent to [*types.Package.Name].
+func NameRelativeTo(pkg *types.Package) types.Qualifier {
+	return func(other *types.Package) string {
+		if pkg != nil && pkg == other {
+			return "" // same package; unqualified
+		}
+		return other.Name()
+	}
+}
+
+// A NamedOrAlias is a [types.Type] that is named (as
+// defined by the spec) and capable of bearing type parameters: it
+// abstracts aliases ([types.Alias]) and defined types
+// ([types.Named]).
+//
+// Every type declared by an explicit "type" declaration is a
+// NamedOrAlias. (Built-in type symbols may additionally
+// have type [types.Basic], which is not a NamedOrAlias,
+// though the spec regards them as "named".)
+//
+// NamedOrAlias cannot expose the Origin method, because
+// [types.Alias.Origin] and [types.Named.Origin] have different
+// (covariant) result types; use [Origin] instead.
+type NamedOrAlias interface {
+	types.Type
+	Obj() *types.TypeName
+	// TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
+}
+
+// TypeParams is a light shim around t.TypeParams().
+// (go/types.Alias).TypeParams requires >= 1.23.
+func TypeParams(t NamedOrAlias) *types.TypeParamList {
+	switch t := t.(type) {
+	case *types.Alias:
+		return aliases.TypeParams(t)
+	case *types.Named:
+		return t.TypeParams()
+	}
+	return nil
+}
+
+// TypeArgs is a light shim around t.TypeArgs().
+// (go/types.Alias).TypeArgs requires >= 1.23.
+func TypeArgs(t NamedOrAlias) *types.TypeList {
+	switch t := t.(type) {
+	case *types.Alias:
+		return aliases.TypeArgs(t)
+	case *types.Named:
+		return t.TypeArgs()
+	}
+	return nil
+}
+
+// Origin returns the generic type of the Named or Alias type t if it
+// is instantiated, otherwise it returns t.
+func Origin(t NamedOrAlias) NamedOrAlias {
+	switch t := t.(type) {
+	case *types.Alias:
+		return aliases.Origin(t)
+	case *types.Named:
+		return t.Origin()
+	}
+	return t
+}
+
+// IsPackageLevel reports whether obj is a package-level symbol.
+func IsPackageLevel(obj types.Object) bool {
+	return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope()
+}
+
+// NewTypesInfo returns a *types.Info with all maps populated.
+func NewTypesInfo() *types.Info {
+	return &types.Info{
+		Types:        map[ast.Expr]types.TypeAndValue{},
+		Instances:    map[*ast.Ident]types.Instance{},
+		Defs:         map[*ast.Ident]types.Object{},
+		Uses:         map[*ast.Ident]types.Object{},
+		Implicits:    map[ast.Node]types.Object{},
+		Selections:   map[*ast.SelectorExpr]*types.Selection{},
+		Scopes:       map[ast.Node]*types.Scope{},
+		FileVersions: map[*ast.File]string{},
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
new file mode 100644
index 00000000..e5da0495
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
@@ -0,0 +1,40 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+// TODO(adonovan): when CL 645115 lands, define the go1.25 version of
+// this API that actually does something.
+
+import "go/types"
+
+type VarKind uint8
+
+const (
+	_          VarKind = iota // (not meaningful)
+	PackageVar                // a package-level variable
+	LocalVar                  // a local variable
+	RecvVar                   // a method receiver variable
+	ParamVar                  // a function parameter variable
+	ResultVar                 // a function result variable
+	FieldVar                  // a struct field
+)
+
+func (kind VarKind) String() string {
+	return [...]string{
+		0:          "VarKind(0)",
+		PackageVar: "PackageVar",
+		LocalVar:   "LocalVar",
+		RecvVar:    "RecvVar",
+		ParamVar:   "ParamVar",
+		ResultVar:  "ResultVar",
+		FieldVar:   "FieldVar",
+	}[kind]
+}
+
+// GetVarKind returns an invalid VarKind.
+func GetVarKind(v *types.Var) VarKind { return 0 }
+
+// SetVarKind has no effect.
+func SetVarKind(v *types.Var, kind VarKind) {}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
new file mode 100644
index 00000000..d272949c
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -0,0 +1,392 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+	"fmt"
+	"go/ast"
+	"go/token"
+	"go/types"
+	"strings"
+)
+
+// ZeroString returns the string representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroString may return a partially correct
+// string representation. The caller should use the returned isValid boolean
+// to determine the validity of the expression.
+//
+// When assigning to a wider type (such as 'any'), it's the caller's
+// responsibility to handle any necessary type conversions.
+//
+// This string can be used on the right-hand side of an assignment where the
+// left-hand side has that explicit type.
+// References to named types are qualified by an appropriate (optional)
+// qualifier function.
+// Exception: This does not apply to tuples. Their string representation is
+// informational only and cannot be used in an assignment.
+//
+// See [ZeroExpr] for a variant that returns an [ast.Expr].
+func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
+	switch t := t.(type) {
+	case *types.Basic:
+		switch {
+		case t.Info()&types.IsBoolean != 0:
+			return "false", true
+		case t.Info()&types.IsNumeric != 0:
+			return "0", true
+		case t.Info()&types.IsString != 0:
+			return `""`, true
+		case t.Kind() == types.UnsafePointer:
+			fallthrough
+		case t.Kind() == types.UntypedNil:
+			return "nil", true
+		case t.Kind() == types.Invalid:
+			return "invalid", false
+		default:
+			panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
+		}
+
+	case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+		return "nil", true
+
+	case *types.Interface:
+		if !t.IsMethodSet() {
+			return "invalid", false
+		}
+		return "nil", true
+
+	case *types.Named:
+		switch under := t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return types.TypeString(t, qual) + "{}", true
+		default:
+			return ZeroString(under, qual)
+		}
+
+	case *types.Alias:
+		switch t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return types.TypeString(t, qual) + "{}", true
+		default:
+			// A type parameter can have alias but alias type's underlying type
+			// can never be a type parameter.
+			// Use types.Unalias to preserve the info of type parameter instead
+			// of call Underlying() going right through and get the underlying
+			// type of the type parameter which is always an interface.
+			return ZeroString(types.Unalias(t), qual)
+		}
+
+	case *types.Array, *types.Struct:
+		return types.TypeString(t, qual) + "{}", true
+
+	case *types.TypeParam:
+		// Assumes func new is not shadowed.
+		return "*new(" + types.TypeString(t, qual) + ")", true
+
+	case *types.Tuple:
+		// Tuples are not normal values.
+		// We are currently format as "(t[0], ..., t[n])". Could be something else.
+		isValid := true
+		components := make([]string, t.Len())
+		for i := 0; i < t.Len(); i++ {
+			comp, ok := ZeroString(t.At(i).Type(), qual)
+
+			components[i] = comp
+			isValid = isValid && ok
+		}
+		return "(" + strings.Join(components, ", ") + ")", isValid
+
+	case *types.Union:
+		// Variables of these types cannot be created, so it makes
+		// no sense to ask for their zero value.
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	default:
+		panic(t) // unreachable.
+	}
+}
+
+// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
+// representation. The caller should use the returned isValid boolean to determine
+// the validity of the expression.
+//
+// This function is designed for types suitable for variables and should not be
+// used with Tuple or Union types.References to named types are qualified by an
+// appropriate (optional) qualifier function.
+//
+// See [ZeroString] for a variant that returns a string.
+func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
+	switch t := t.(type) {
+	case *types.Basic:
+		switch {
+		case t.Info()&types.IsBoolean != 0:
+			return &ast.Ident{Name: "false"}, true
+		case t.Info()&types.IsNumeric != 0:
+			return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
+		case t.Info()&types.IsString != 0:
+			return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
+		case t.Kind() == types.UnsafePointer:
+			fallthrough
+		case t.Kind() == types.UntypedNil:
+			return ast.NewIdent("nil"), true
+		case t.Kind() == types.Invalid:
+			return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+		default:
+			panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
+		}
+
+	case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+		return ast.NewIdent("nil"), true
+
+	case *types.Interface:
+		if !t.IsMethodSet() {
+			return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+		}
+		return ast.NewIdent("nil"), true
+
+	case *types.Named:
+		switch under := t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return &ast.CompositeLit{
+				Type: TypeExpr(t, qual),
+			}, true
+		default:
+			return ZeroExpr(under, qual)
+		}
+
+	case *types.Alias:
+		switch t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return &ast.CompositeLit{
+				Type: TypeExpr(t, qual),
+			}, true
+		default:
+			return ZeroExpr(types.Unalias(t), qual)
+		}
+
+	case *types.Array, *types.Struct:
+		return &ast.CompositeLit{
+			Type: TypeExpr(t, qual),
+		}, true
+
+	case *types.TypeParam:
+		return &ast.StarExpr{ // *new(T)
+			X: &ast.CallExpr{
+				// Assumes func new is not shadowed.
+				Fun: ast.NewIdent("new"),
+				Args: []ast.Expr{
+					ast.NewIdent(t.Obj().Name()),
+				},
+			},
+		}, true
+
+	case *types.Tuple:
+		// Unlike ZeroString, there is no ast.Expr can express tuple by
+		// "(t[0], ..., t[n])".
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	case *types.Union:
+		// Variables of these types cannot be created, so it makes
+		// no sense to ask for their zero value.
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	default:
+		panic(t) // unreachable.
+	}
+}
+
+// IsZeroExpr uses simple syntactic heuristics to report whether expr
+// is a obvious zero value, such as 0, "", nil, or false.
+// It cannot do better without type information.
+func IsZeroExpr(expr ast.Expr) bool {
+	switch e := expr.(type) {
+	case *ast.BasicLit:
+		return e.Value == "0" || e.Value == `""`
+	case *ast.Ident:
+		return e.Name == "nil" || e.Name == "false"
+	default:
+		return false
+	}
+}
+
+// TypeExpr returns syntax for the specified type. References to named types
+// are qualified by an appropriate (optional) qualifier function.
+// It may panic for types such as Tuple or Union.
+func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
+	switch t := t.(type) {
+	case *types.Basic:
+		switch t.Kind() {
+		case types.UnsafePointer:
+			return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
+		default:
+			return ast.NewIdent(t.Name())
+		}
+
+	case *types.Pointer:
+		return &ast.UnaryExpr{
+			Op: token.MUL,
+			X:  TypeExpr(t.Elem(), qual),
+		}
+
+	case *types.Array:
+		return &ast.ArrayType{
+			Len: &ast.BasicLit{
+				Kind:  token.INT,
+				Value: fmt.Sprintf("%d", t.Len()),
+			},
+			Elt: TypeExpr(t.Elem(), qual),
+		}
+
+	case *types.Slice:
+		return &ast.ArrayType{
+			Elt: TypeExpr(t.Elem(), qual),
+		}
+
+	case *types.Map:
+		return &ast.MapType{
+			Key:   TypeExpr(t.Key(), qual),
+			Value: TypeExpr(t.Elem(), qual),
+		}
+
+	case *types.Chan:
+		dir := ast.ChanDir(t.Dir())
+		if t.Dir() == types.SendRecv {
+			dir = ast.SEND | ast.RECV
+		}
+		return &ast.ChanType{
+			Dir:   dir,
+			Value: TypeExpr(t.Elem(), qual),
+		}
+
+	case *types.Signature:
+		var params []*ast.Field
+		for i := 0; i < t.Params().Len(); i++ {
+			params = append(params, &ast.Field{
+				Type: TypeExpr(t.Params().At(i).Type(), qual),
+				Names: []*ast.Ident{
+					{
+						Name: t.Params().At(i).Name(),
+					},
+				},
+			})
+		}
+		if t.Variadic() {
+			last := params[len(params)-1]
+			last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
+		}
+		var returns []*ast.Field
+		for i := 0; i < t.Results().Len(); i++ {
+			returns = append(returns, &ast.Field{
+				Type: TypeExpr(t.Results().At(i).Type(), qual),
+			})
+		}
+		return &ast.FuncType{
+			Params: &ast.FieldList{
+				List: params,
+			},
+			Results: &ast.FieldList{
+				List: returns,
+			},
+		}
+
+	case *types.TypeParam:
+		pkgName := qual(t.Obj().Pkg())
+		if pkgName == "" || t.Obj().Pkg() == nil {
+			return ast.NewIdent(t.Obj().Name())
+		}
+		return &ast.SelectorExpr{
+			X:   ast.NewIdent(pkgName),
+			Sel: ast.NewIdent(t.Obj().Name()),
+		}
+
+	// types.TypeParam also implements interface NamedOrAlias. To differentiate,
+	// case TypeParam need to be present before case NamedOrAlias.
+	// TODO(hxjiang): remove this comment once TypeArgs() is added to interface
+	// NamedOrAlias.
+	case NamedOrAlias:
+		var expr ast.Expr = ast.NewIdent(t.Obj().Name())
+		if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
+			expr = &ast.SelectorExpr{
+				X:   ast.NewIdent(pkgName),
+				Sel: expr.(*ast.Ident),
+			}
+		}
+
+		// TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
+		// typesinternal.NamedOrAlias.
+		if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
+			if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
+				var indices []ast.Expr
+				for i := range typeArgs.Len() {
+					indices = append(indices, TypeExpr(typeArgs.At(i), qual))
+				}
+				expr = &ast.IndexListExpr{
+					X:       expr,
+					Indices: indices,
+				}
+			}
+		}
+
+		return expr
+
+	case *types.Struct:
+		return ast.NewIdent(t.String())
+
+	case *types.Interface:
+		return ast.NewIdent(t.String())
+
+	case *types.Union:
+		if t.Len() == 0 {
+			panic("Union type should have at least one term")
+		}
+		// Same as go/ast, the return expression will put last term in the
+		// Y field at topmost level of BinaryExpr.
+		// For union of type "float32 | float64 | int64", the structure looks
+		// similar to:
+		// {
+		// 	X: {
+		// 		X: float32,
+		// 		Op: |
+		// 		Y: float64,
+		// 	}
+		// 	Op: |,
+		// 	Y: int64,
+		// }
+		var union ast.Expr
+		for i := range t.Len() {
+			term := t.Term(i)
+			termExpr := TypeExpr(term.Type(), qual)
+			if term.Tilde() {
+				termExpr = &ast.UnaryExpr{
+					Op: token.TILDE,
+					X:  termExpr,
+				}
+			}
+			if i == 0 {
+				union = termExpr
+			} else {
+				union = &ast.BinaryExpr{
+					X:  union,
+					Op: token.OR,
+					Y:  termExpr,
+				}
+			}
+		}
+		return union
+
+	case *types.Tuple:
+		panic("invalid input type types.Tuple")
+
+	default:
+		panic("unreachable")
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go
deleted file mode 100644
index 377bf7a5..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-// toolchain is maximum version (<1.22) that the go toolchain used
-// to build the current tool is known to support.
-//
-// When a tool is built with >=1.22, the value of toolchain is unused.
-//
-// x/tools does not support building with go <1.18. So we take this
-// as the minimum possible maximum.
-var toolchain string = Go1_18
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
deleted file mode 100644
index f65beed9..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.19
-// +build go1.19
-
-package versions
-
-func init() {
-	if Compare(toolchain, Go1_19) < 0 {
-		toolchain = Go1_19
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
deleted file mode 100644
index b7ef216d..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-// +build go1.21
-
-package versions
-
-func init() {
-	if Compare(toolchain, Go1_21) < 0 {
-		toolchain = Go1_21
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go
index 562eef21..0fc10ce4 100644
--- a/vendor/golang.org/x/tools/internal/versions/types.go
+++ b/vendor/golang.org/x/tools/internal/versions/types.go
@@ -5,15 +5,29 @@
 package versions
 
 import (
+	"go/ast"
 	"go/types"
 )
 
-// GoVersion returns the Go version of the type package.
-// It returns zero if no version can be determined.
-func GoVersion(pkg *types.Package) string {
-	// TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
-	if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
-		return pkg.GoVersion()
+// FileVersion returns a file's Go version.
+// The reported version is an unknown Future version if a
+// version cannot be determined.
+func FileVersion(info *types.Info, file *ast.File) string {
+	// In tools built with Go >= 1.22, the Go version of a file
+	// follow a cascades of sources:
+	// 1) types.Info.FileVersion, which follows the cascade:
+	//   1.a) file version (ast.File.GoVersion),
+	//   1.b) the package version (types.Config.GoVersion), or
+	// 2) is some unknown Future version.
+	//
+	// File versions require a valid package version to be provided to types
+	// in Config.GoVersion. Config.GoVersion is either from the package's module
+	// or the toolchain (go run). This value should be provided by go/packages
+	// or unitchecker.Config.GoVersion.
+	if v := info.FileVersions[file]; IsValid(v) {
+		return v
 	}
-	return ""
+	// Note: we could instead return runtime.Version() [if valid].
+	// This would act as a max version on what a tool can support.
+	return Future
 }
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go
deleted file mode 100644
index b4345d33..00000000
--- a/vendor/golang.org/x/tools/internal/versions/types_go121.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.22
-// +build !go1.22
-
-package versions
-
-import (
-	"go/ast"
-	"go/types"
-)
-
-// FileVersion returns a language version (<=1.21) derived from runtime.Version()
-// or an unknown future version.
-func FileVersion(info *types.Info, file *ast.File) string {
-	// In x/tools built with Go <= 1.21, we do not have Info.FileVersions
-	// available. We use a go version derived from the toolchain used to
-	// compile the tool by default.
-	// This will be <= go1.21. We take this as the maximum version that
-	// this tool can support.
-	//
-	// There are no features currently in x/tools that need to tell fine grained
-	// differences for versions <1.22.
-	return toolchain
-}
-
-// InitFileVersions is a noop when compiled with this Go version.
-func InitFileVersions(*types.Info) {}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go
deleted file mode 100644
index aac5db62..00000000
--- a/vendor/golang.org/x/tools/internal/versions/types_go122.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.22
-// +build go1.22
-
-package versions
-
-import (
-	"go/ast"
-	"go/types"
-)
-
-// FileVersion returns a file's Go version.
-// The reported version is an unknown Future version if a
-// version cannot be determined.
-func FileVersion(info *types.Info, file *ast.File) string {
-	// In tools built with Go >= 1.22, the Go version of a file
-	// follow a cascades of sources:
-	// 1) types.Info.FileVersion, which follows the cascade:
-	//   1.a) file version (ast.File.GoVersion),
-	//   1.b) the package version (types.Config.GoVersion), or
-	// 2) is some unknown Future version.
-	//
-	// File versions require a valid package version to be provided to types
-	// in Config.GoVersion. Config.GoVersion is either from the package's module
-	// or the toolchain (go run). This value should be provided by go/packages
-	// or unitchecker.Config.GoVersion.
-	if v := info.FileVersions[file]; IsValid(v) {
-		return v
-	}
-	// Note: we could instead return runtime.Version() [if valid].
-	// This would act as a max version on what a tool can support.
-	return Future
-}
-
-// InitFileVersions initializes info to record Go versions for Go files.
-func InitFileVersions(info *types.Info) {
-	info.FileVersions = make(map[*ast.File]string)
-}
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
deleted file mode 100644
index d6456956..00000000
--- a/vendor/google.golang.org/appengine/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
deleted file mode 100644
index 0569f5dd..00000000
--- a/vendor/google.golang.org/appengine/internal/api.go
+++ /dev/null
@@ -1,653 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-//go:build !appengine
-// +build !appengine
-
-package internal
-
-import (
-	"bytes"
-	"context"
-	"errors"
-	"fmt"
-	"io/ioutil"
-	"log"
-	"net"
-	"net/http"
-	"net/url"
-	"os"
-	"runtime"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"github.com/golang/protobuf/proto"
-
-	basepb "google.golang.org/appengine/internal/base"
-	logpb "google.golang.org/appengine/internal/log"
-	remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-const (
-	apiPath = "/rpc_http"
-)
-
-var (
-	// Incoming headers.
-	ticketHeader       = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
-	dapperHeader       = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
-	traceHeader        = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
-	curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
-	userIPHeader       = http.CanonicalHeaderKey("X-AppEngine-User-IP")
-	remoteAddrHeader   = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
-	devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id")
-
-	// Outgoing headers.
-	apiEndpointHeader      = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
-	apiEndpointHeaderValue = []string{"app-engine-apis"}
-	apiMethodHeader        = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
-	apiMethodHeaderValue   = []string{"/VMRemoteAPI.CallRemoteAPI"}
-	apiDeadlineHeader      = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
-	apiContentType         = http.CanonicalHeaderKey("Content-Type")
-	apiContentTypeValue    = []string{"application/octet-stream"}
-	logFlushHeader         = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
-
-	apiHTTPClient = &http.Client{
-		Transport: &http.Transport{
-			Proxy:               http.ProxyFromEnvironment,
-			Dial:                limitDial,
-			MaxIdleConns:        1000,
-			MaxIdleConnsPerHost: 10000,
-			IdleConnTimeout:     90 * time.Second,
-		},
-	}
-)
-
-func apiURL(ctx context.Context) *url.URL {
-	host, port := "appengine.googleapis.internal", "10001"
-	if h := os.Getenv("API_HOST"); h != "" {
-		host = h
-	}
-	if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil {
-		host = hostOverride.(string)
-	}
-	if p := os.Getenv("API_PORT"); p != "" {
-		port = p
-	}
-	if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil {
-		port = portOverride.(string)
-	}
-	return &url.URL{
-		Scheme: "http",
-		Host:   host + ":" + port,
-		Path:   apiPath,
-	}
-}
-
-// Middleware wraps an http handler so that it can make GAE API calls
-func Middleware(next http.Handler) http.Handler {
-	return handleHTTPMiddleware(executeRequestSafelyMiddleware(next))
-}
-
-func handleHTTPMiddleware(next http.Handler) http.Handler {
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		c := &aeContext{
-			req:       r,
-			outHeader: w.Header(),
-		}
-		r = r.WithContext(withContext(r.Context(), c))
-		c.req = r
-
-		stopFlushing := make(chan int)
-
-		// Patch up RemoteAddr so it looks reasonable.
-		if addr := r.Header.Get(userIPHeader); addr != "" {
-			r.RemoteAddr = addr
-		} else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
-			r.RemoteAddr = addr
-		} else {
-			// Should not normally reach here, but pick a sensible default anyway.
-			r.RemoteAddr = "127.0.0.1"
-		}
-		// The address in the headers will most likely be of these forms:
-		//	123.123.123.123
-		//	2001:db8::1
-		// net/http.Request.RemoteAddr is specified to be in "IP:port" form.
-		if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
-			// Assume the remote address is only a host; add a default port.
-			r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
-		}
-
-		if logToLogservice() {
-			// Start goroutine responsible for flushing app logs.
-			// This is done after adding c to ctx.m (and stopped before removing it)
-			// because flushing logs requires making an API call.
-			go c.logFlusher(stopFlushing)
-		}
-
-		next.ServeHTTP(c, r)
-		c.outHeader = nil // make sure header changes aren't respected any more
-
-		flushed := make(chan struct{})
-		if logToLogservice() {
-			stopFlushing <- 1 // any logging beyond this point will be dropped
-
-			// Flush any pending logs asynchronously.
-			c.pendingLogs.Lock()
-			flushes := c.pendingLogs.flushes
-			if len(c.pendingLogs.lines) > 0 {
-				flushes++
-			}
-			c.pendingLogs.Unlock()
-			go func() {
-				defer close(flushed)
-				// Force a log flush, because with very short requests we
-				// may not ever flush logs.
-				c.flushLog(true)
-			}()
-			w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
-		}
-
-		// Avoid nil Write call if c.Write is never called.
-		if c.outCode != 0 {
-			w.WriteHeader(c.outCode)
-		}
-		if c.outBody != nil {
-			w.Write(c.outBody)
-		}
-		if logToLogservice() {
-			// Wait for the last flush to complete before returning,
-			// otherwise the security ticket will not be valid.
-			<-flushed
-		}
-	})
-}
-
-func executeRequestSafelyMiddleware(next http.Handler) http.Handler {
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		defer func() {
-			if x := recover(); x != nil {
-				c := w.(*aeContext)
-				logf(c, 4, "%s", renderPanic(x)) // 4 == critical
-				c.outCode = 500
-			}
-		}()
-
-		next.ServeHTTP(w, r)
-	})
-}
-
-func renderPanic(x interface{}) string {
-	buf := make([]byte, 16<<10) // 16 KB should be plenty
-	buf = buf[:runtime.Stack(buf, false)]
-
-	// Remove the first few stack frames:
-	//   this func
-	//   the recover closure in the caller
-	// That will root the stack trace at the site of the panic.
-	const (
-		skipStart  = "internal.renderPanic"
-		skipFrames = 2
-	)
-	start := bytes.Index(buf, []byte(skipStart))
-	p := start
-	for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
-		p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
-		if p < 0 {
-			break
-		}
-	}
-	if p >= 0 {
-		// buf[start:p+1] is the block to remove.
-		// Copy buf[p+1:] over buf[start:] and shrink buf.
-		copy(buf[start:], buf[p+1:])
-		buf = buf[:len(buf)-(p+1-start)]
-	}
-
-	// Add panic heading.
-	head := fmt.Sprintf("panic: %v\n\n", x)
-	if len(head) > len(buf) {
-		// Extremely unlikely to happen.
-		return head
-	}
-	copy(buf[len(head):], buf)
-	copy(buf, head)
-
-	return string(buf)
-}
-
-// aeContext represents the aeContext of an in-flight HTTP request.
-// It implements the appengine.Context and http.ResponseWriter interfaces.
-type aeContext struct {
-	req *http.Request
-
-	outCode   int
-	outHeader http.Header
-	outBody   []byte
-
-	pendingLogs struct {
-		sync.Mutex
-		lines   []*logpb.UserAppLogLine
-		flushes int
-	}
-}
-
-var contextKey = "holds a *context"
-
-// jointContext joins two contexts in a superficial way.
-// It takes values and timeouts from a base context, and only values from another context.
-type jointContext struct {
-	base       context.Context
-	valuesOnly context.Context
-}
-
-func (c jointContext) Deadline() (time.Time, bool) {
-	return c.base.Deadline()
-}
-
-func (c jointContext) Done() <-chan struct{} {
-	return c.base.Done()
-}
-
-func (c jointContext) Err() error {
-	return c.base.Err()
-}
-
-func (c jointContext) Value(key interface{}) interface{} {
-	if val := c.base.Value(key); val != nil {
-		return val
-	}
-	return c.valuesOnly.Value(key)
-}
-
-// fromContext returns the App Engine context or nil if ctx is not
-// derived from an App Engine context.
-func fromContext(ctx context.Context) *aeContext {
-	c, _ := ctx.Value(&contextKey).(*aeContext)
-	return c
-}
-
-func withContext(parent context.Context, c *aeContext) context.Context {
-	ctx := context.WithValue(parent, &contextKey, c)
-	if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
-		ctx = withNamespace(ctx, ns)
-	}
-	return ctx
-}
-
-func toContext(c *aeContext) context.Context {
-	return withContext(context.Background(), c)
-}
-
-func IncomingHeaders(ctx context.Context) http.Header {
-	if c := fromContext(ctx); c != nil {
-		return c.req.Header
-	}
-	return nil
-}
-
-func ReqContext(req *http.Request) context.Context {
-	return req.Context()
-}
-
-func WithContext(parent context.Context, req *http.Request) context.Context {
-	return jointContext{
-		base:       parent,
-		valuesOnly: req.Context(),
-	}
-}
-
-// RegisterTestRequest registers the HTTP request req for testing, such that
-// any API calls are sent to the provided URL.
-// It should only be used by aetest package.
-func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request {
-	ctx := req.Context()
-	ctx = withAPIHostOverride(ctx, apiURL.Hostname())
-	ctx = withAPIPortOverride(ctx, apiURL.Port())
-	ctx = WithAppIDOverride(ctx, appID)
-
-	// use the unregistered request as a placeholder so that withContext can read the headers
-	c := &aeContext{req: req}
-	c.req = req.WithContext(withContext(ctx, c))
-	return c.req
-}
-
-var errTimeout = &CallError{
-	Detail:  "Deadline exceeded",
-	Code:    int32(remotepb.RpcError_CANCELLED),
-	Timeout: true,
-}
-
-func (c *aeContext) Header() http.Header { return c.outHeader }
-
-// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
-// codes do not permit a response body (nor response entity headers such as
-// Content-Length, Content-Type, etc).
-func bodyAllowedForStatus(status int) bool {
-	switch {
-	case status >= 100 && status <= 199:
-		return false
-	case status == 204:
-		return false
-	case status == 304:
-		return false
-	}
-	return true
-}
-
-func (c *aeContext) Write(b []byte) (int, error) {
-	if c.outCode == 0 {
-		c.WriteHeader(http.StatusOK)
-	}
-	if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
-		return 0, http.ErrBodyNotAllowed
-	}
-	c.outBody = append(c.outBody, b...)
-	return len(b), nil
-}
-
-func (c *aeContext) WriteHeader(code int) {
-	if c.outCode != 0 {
-		logf(c, 3, "WriteHeader called multiple times on request.") // error level
-		return
-	}
-	c.outCode = code
-}
-
-func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) {
-	apiURL := apiURL(ctx)
-	hreq := &http.Request{
-		Method: "POST",
-		URL:    apiURL,
-		Header: http.Header{
-			apiEndpointHeader: apiEndpointHeaderValue,
-			apiMethodHeader:   apiMethodHeaderValue,
-			apiContentType:    apiContentTypeValue,
-			apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
-		},
-		Body:          ioutil.NopCloser(bytes.NewReader(body)),
-		ContentLength: int64(len(body)),
-		Host:          apiURL.Host,
-	}
-	c := fromContext(ctx)
-	if c != nil {
-		if info := c.req.Header.Get(dapperHeader); info != "" {
-			hreq.Header.Set(dapperHeader, info)
-		}
-		if info := c.req.Header.Get(traceHeader); info != "" {
-			hreq.Header.Set(traceHeader, info)
-		}
-	}
-
-	tr := apiHTTPClient.Transport.(*http.Transport)
-
-	var timedOut int32 // atomic; set to 1 if timed out
-	t := time.AfterFunc(timeout, func() {
-		atomic.StoreInt32(&timedOut, 1)
-		tr.CancelRequest(hreq)
-	})
-	defer t.Stop()
-	defer func() {
-		// Check if timeout was exceeded.
-		if atomic.LoadInt32(&timedOut) != 0 {
-			err = errTimeout
-		}
-	}()
-
-	hresp, err := apiHTTPClient.Do(hreq)
-	if err != nil {
-		return nil, &CallError{
-			Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
-			Code:   int32(remotepb.RpcError_UNKNOWN),
-		}
-	}
-	defer hresp.Body.Close()
-	hrespBody, err := ioutil.ReadAll(hresp.Body)
-	if hresp.StatusCode != 200 {
-		return nil, &CallError{
-			Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
-			Code:   int32(remotepb.RpcError_UNKNOWN),
-		}
-	}
-	if err != nil {
-		return nil, &CallError{
-			Detail: fmt.Sprintf("service bridge response bad: %v", err),
-			Code:   int32(remotepb.RpcError_UNKNOWN),
-		}
-	}
-	return hrespBody, nil
-}
-
-func Call(ctx context.Context, service, method string, in, out proto.Message) error {
-	if ns := NamespaceFromContext(ctx); ns != "" {
-		if fn, ok := NamespaceMods[service]; ok {
-			fn(in, ns)
-		}
-	}
-
-	if f, ctx, ok := callOverrideFromContext(ctx); ok {
-		return f(ctx, service, method, in, out)
-	}
-
-	// Handle already-done contexts quickly.
-	select {
-	case <-ctx.Done():
-		return ctx.Err()
-	default:
-	}
-
-	c := fromContext(ctx)
-
-	// Apply transaction modifications if we're in a transaction.
-	if t := transactionFromContext(ctx); t != nil {
-		if t.finished {
-			return errors.New("transaction aeContext has expired")
-		}
-		applyTransaction(in, &t.transaction)
-	}
-
-	// Default RPC timeout is 60s.
-	timeout := 60 * time.Second
-	if deadline, ok := ctx.Deadline(); ok {
-		timeout = deadline.Sub(time.Now())
-	}
-
-	data, err := proto.Marshal(in)
-	if err != nil {
-		return err
-	}
-
-	ticket := ""
-	if c != nil {
-		ticket = c.req.Header.Get(ticketHeader)
-		if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
-			ticket = dri
-		}
-	}
-	req := &remotepb.Request{
-		ServiceName: &service,
-		Method:      &method,
-		Request:     data,
-		RequestId:   &ticket,
-	}
-	hreqBody, err := proto.Marshal(req)
-	if err != nil {
-		return err
-	}
-
-	hrespBody, err := post(ctx, hreqBody, timeout)
-	if err != nil {
-		return err
-	}
-
-	res := &remotepb.Response{}
-	if err := proto.Unmarshal(hrespBody, res); err != nil {
-		return err
-	}
-	if res.RpcError != nil {
-		ce := &CallError{
-			Detail: res.RpcError.GetDetail(),
-			Code:   *res.RpcError.Code,
-		}
-		switch remotepb.RpcError_ErrorCode(ce.Code) {
-		case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
-			ce.Timeout = true
-		}
-		return ce
-	}
-	if res.ApplicationError != nil {
-		return &APIError{
-			Service: *req.ServiceName,
-			Detail:  res.ApplicationError.GetDetail(),
-			Code:    *res.ApplicationError.Code,
-		}
-	}
-	if res.Exception != nil || res.JavaException != nil {
-		// This shouldn't happen, but let's be defensive.
-		return &CallError{
-			Detail: "service bridge returned exception",
-			Code:   int32(remotepb.RpcError_UNKNOWN),
-		}
-	}
-	return proto.Unmarshal(res.Response, out)
-}
-
-func (c *aeContext) Request() *http.Request {
-	return c.req
-}
-
-func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) {
-	// Truncate long log lines.
-	// TODO(dsymonds): Check if this is still necessary.
-	const lim = 8 << 10
-	if len(*ll.Message) > lim {
-		suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
-		ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
-	}
-
-	c.pendingLogs.Lock()
-	c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
-	c.pendingLogs.Unlock()
-}
-
-var logLevelName = map[int64]string{
-	0: "DEBUG",
-	1: "INFO",
-	2: "WARNING",
-	3: "ERROR",
-	4: "CRITICAL",
-}
-
-func logf(c *aeContext, level int64, format string, args ...interface{}) {
-	if c == nil {
-		panic("not an App Engine aeContext")
-	}
-	s := fmt.Sprintf(format, args...)
-	s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
-	if logToLogservice() {
-		c.addLogLine(&logpb.UserAppLogLine{
-			TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
-			Level:         &level,
-			Message:       &s,
-		})
-	}
-	// Log to stdout if not deployed
-	if !IsSecondGen() {
-		log.Print(logLevelName[level] + ": " + s)
-	}
-}
-
-// flushLog attempts to flush any pending logs to the appserver.
-// It should not be called concurrently.
-func (c *aeContext) flushLog(force bool) (flushed bool) {
-	c.pendingLogs.Lock()
-	// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
-	n, rem := 0, 30<<20
-	for ; n < len(c.pendingLogs.lines); n++ {
-		ll := c.pendingLogs.lines[n]
-		// Each log line will require about 3 bytes of overhead.
-		nb := proto.Size(ll) + 3
-		if nb > rem {
-			break
-		}
-		rem -= nb
-	}
-	lines := c.pendingLogs.lines[:n]
-	c.pendingLogs.lines = c.pendingLogs.lines[n:]
-	c.pendingLogs.Unlock()
-
-	if len(lines) == 0 && !force {
-		// Nothing to flush.
-		return false
-	}
-
-	rescueLogs := false
-	defer func() {
-		if rescueLogs {
-			c.pendingLogs.Lock()
-			c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
-			c.pendingLogs.Unlock()
-		}
-	}()
-
-	buf, err := proto.Marshal(&logpb.UserAppLogGroup{
-		LogLine: lines,
-	})
-	if err != nil {
-		log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
-		rescueLogs = true
-		return false
-	}
-
-	req := &logpb.FlushRequest{
-		Logs: buf,
-	}
-	res := &basepb.VoidProto{}
-	c.pendingLogs.Lock()
-	c.pendingLogs.flushes++
-	c.pendingLogs.Unlock()
-	if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
-		log.Printf("internal.flushLog: Flush RPC: %v", err)
-		rescueLogs = true
-		return false
-	}
-	return true
-}
-
-const (
-	// Log flushing parameters.
-	flushInterval      = 1 * time.Second
-	forceFlushInterval = 60 * time.Second
-)
-
-func (c *aeContext) logFlusher(stop <-chan int) {
-	lastFlush := time.Now()
-	tick := time.NewTicker(flushInterval)
-	for {
-		select {
-		case <-stop:
-			// Request finished.
-			tick.Stop()
-			return
-		case <-tick.C:
-			force := time.Now().Sub(lastFlush) > forceFlushInterval
-			if c.flushLog(force) {
-				lastFlush = time.Now()
-			}
-		}
-	}
-}
-
-func ContextForTesting(req *http.Request) context.Context {
-	return toContext(&aeContext{req: req})
-}
-
-func logToLogservice() bool {
-	// TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json
-	// where $LOG_DIR is /var/log in prod and some tmpdir in dev
-	return os.Getenv("LOG_TO_LOGSERVICE") != "0"
-}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
deleted file mode 100644
index 87c33c79..00000000
--- a/vendor/google.golang.org/appengine/internal/api_classic.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-//go:build appengine
-// +build appengine
-
-package internal
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"net/http"
-	"time"
-
-	"appengine"
-	"appengine_internal"
-	basepb "appengine_internal/base"
-
-	"github.com/golang/protobuf/proto"
-)
-
-var contextKey = "holds an appengine.Context"
-
-// fromContext returns the App Engine context or nil if ctx is not
-// derived from an App Engine context.
-func fromContext(ctx context.Context) appengine.Context {
-	c, _ := ctx.Value(&contextKey).(appengine.Context)
-	return c
-}
-
-// This is only for classic App Engine adapters.
-func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) {
-	c := fromContext(ctx)
-	if c == nil {
-		return nil, errNotAppEngineContext
-	}
-	return c, nil
-}
-
-func withContext(parent context.Context, c appengine.Context) context.Context {
-	ctx := context.WithValue(parent, &contextKey, c)
-
-	s := &basepb.StringProto{}
-	c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
-	if ns := s.GetValue(); ns != "" {
-		ctx = NamespacedContext(ctx, ns)
-	}
-
-	return ctx
-}
-
-func IncomingHeaders(ctx context.Context) http.Header {
-	if c := fromContext(ctx); c != nil {
-		if req, ok := c.Request().(*http.Request); ok {
-			return req.Header
-		}
-	}
-	return nil
-}
-
-func ReqContext(req *http.Request) context.Context {
-	return WithContext(context.Background(), req)
-}
-
-func WithContext(parent context.Context, req *http.Request) context.Context {
-	c := appengine.NewContext(req)
-	return withContext(parent, c)
-}
-
-type testingContext struct {
-	appengine.Context
-
-	req *http.Request
-}
-
-func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
-func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
-	if service == "__go__" && method == "GetNamespace" {
-		return nil
-	}
-	return fmt.Errorf("testingContext: unsupported Call")
-}
-func (t *testingContext) Request() interface{} { return t.req }
-
-func ContextForTesting(req *http.Request) context.Context {
-	return withContext(context.Background(), &testingContext{req: req})
-}
-
-func Call(ctx context.Context, service, method string, in, out proto.Message) error {
-	if ns := NamespaceFromContext(ctx); ns != "" {
-		if fn, ok := NamespaceMods[service]; ok {
-			fn(in, ns)
-		}
-	}
-
-	if f, ctx, ok := callOverrideFromContext(ctx); ok {
-		return f(ctx, service, method, in, out)
-	}
-
-	// Handle already-done contexts quickly.
-	select {
-	case <-ctx.Done():
-		return ctx.Err()
-	default:
-	}
-
-	c := fromContext(ctx)
-	if c == nil {
-		// Give a good error message rather than a panic lower down.
-		return errNotAppEngineContext
-	}
-
-	// Apply transaction modifications if we're in a transaction.
-	if t := transactionFromContext(ctx); t != nil {
-		if t.finished {
-			return errors.New("transaction context has expired")
-		}
-		applyTransaction(in, &t.transaction)
-	}
-
-	var opts *appengine_internal.CallOptions
-	if d, ok := ctx.Deadline(); ok {
-		opts = &appengine_internal.CallOptions{
-			Timeout: d.Sub(time.Now()),
-		}
-	}
-
-	err := c.Call(service, method, in, out, opts)
-	switch v := err.(type) {
-	case *appengine_internal.APIError:
-		return &APIError{
-			Service: v.Service,
-			Detail:  v.Detail,
-			Code:    v.Code,
-		}
-	case *appengine_internal.CallError:
-		return &CallError{
-			Detail:  v.Detail,
-			Code:    v.Code,
-			Timeout: v.Timeout,
-		}
-	}
-	return err
-}
-
-func Middleware(next http.Handler) http.Handler {
-	panic("Middleware called; this should be impossible")
-}
-
-func logf(c appengine.Context, level int64, format string, args ...interface{}) {
-	var fn func(format string, args ...interface{})
-	switch level {
-	case 0:
-		fn = c.Debugf
-	case 1:
-		fn = c.Infof
-	case 2:
-		fn = c.Warningf
-	case 3:
-		fn = c.Errorf
-	case 4:
-		fn = c.Criticalf
-	default:
-		// This shouldn't happen.
-		fn = c.Criticalf
-	}
-	fn(format, args...)
-}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
deleted file mode 100644
index 5b95c13d..00000000
--- a/vendor/google.golang.org/appengine/internal/api_common.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
-	"context"
-	"errors"
-	"os"
-
-	"github.com/golang/protobuf/proto"
-)
-
-type ctxKey string
-
-func (c ctxKey) String() string {
-	return "appengine context key: " + string(c)
-}
-
-var errNotAppEngineContext = errors.New("not an App Engine context")
-
-type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error
-
-var callOverrideKey = "holds []CallOverrideFunc"
-
-func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context {
-	// We avoid appending to any existing call override
-	// so we don't risk overwriting a popped stack below.
-	var cofs []CallOverrideFunc
-	if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
-		cofs = append(cofs, uf...)
-	}
-	cofs = append(cofs, f)
-	return context.WithValue(ctx, &callOverrideKey, cofs)
-}
-
-func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) {
-	cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
-	if len(cofs) == 0 {
-		return nil, nil, false
-	}
-	// We found a list of overrides; grab the last, and reconstitute a
-	// context that will hide it.
-	f := cofs[len(cofs)-1]
-	ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
-	return f, ctx, true
-}
-
-type logOverrideFunc func(level int64, format string, args ...interface{})
-
-var logOverrideKey = "holds a logOverrideFunc"
-
-func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context {
-	return context.WithValue(ctx, &logOverrideKey, f)
-}
-
-var appIDOverrideKey = "holds a string, being the full app ID"
-
-func WithAppIDOverride(ctx context.Context, appID string) context.Context {
-	return context.WithValue(ctx, &appIDOverrideKey, appID)
-}
-
-var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST")
-
-func withAPIHostOverride(ctx context.Context, apiHost string) context.Context {
-	return context.WithValue(ctx, apiHostOverrideKey, apiHost)
-}
-
-var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT")
-
-func withAPIPortOverride(ctx context.Context, apiPort string) context.Context {
-	return context.WithValue(ctx, apiPortOverrideKey, apiPort)
-}
-
-var namespaceKey = "holds the namespace string"
-
-func withNamespace(ctx context.Context, ns string) context.Context {
-	return context.WithValue(ctx, &namespaceKey, ns)
-}
-
-func NamespaceFromContext(ctx context.Context) string {
-	// If there's no namespace, return the empty string.
-	ns, _ := ctx.Value(&namespaceKey).(string)
-	return ns
-}
-
-// FullyQualifiedAppID returns the fully-qualified application ID.
-// This may contain a partition prefix (e.g. "s~" for High Replication apps),
-// or a domain prefix (e.g. "example.com:").
-func FullyQualifiedAppID(ctx context.Context) string {
-	if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
-		return id
-	}
-	return fullyQualifiedAppID(ctx)
-}
-
-func Logf(ctx context.Context, level int64, format string, args ...interface{}) {
-	if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
-		f(level, format, args...)
-		return
-	}
-	c := fromContext(ctx)
-	if c == nil {
-		panic(errNotAppEngineContext)
-	}
-	logf(c, level, format, args...)
-}
-
-// NamespacedContext wraps a Context to support namespaces.
-func NamespacedContext(ctx context.Context, namespace string) context.Context {
-	return withNamespace(ctx, namespace)
-}
-
-// SetTestEnv sets the env variables for testing background ticket in Flex.
-func SetTestEnv() func() {
-	var environ = []struct {
-		key, value string
-	}{
-		{"GAE_LONG_APP_ID", "my-app-id"},
-		{"GAE_MINOR_VERSION", "067924799508853122"},
-		{"GAE_MODULE_INSTANCE", "0"},
-		{"GAE_MODULE_NAME", "default"},
-		{"GAE_MODULE_VERSION", "20150612t184001"},
-	}
-
-	for _, v := range environ {
-		old := os.Getenv(v.key)
-		os.Setenv(v.key, v.value)
-		v.value = old
-	}
-	return func() { // Restore old environment after the test completes.
-		for _, v := range environ {
-			if v.value == "" {
-				os.Unsetenv(v.key)
-				continue
-			}
-			os.Setenv(v.key, v.value)
-		}
-	}
-}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
deleted file mode 100644
index 11df8c07..00000000
--- a/vendor/google.golang.org/appengine/internal/app_id.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
-	"strings"
-)
-
-func parseFullAppID(appid string) (partition, domain, displayID string) {
-	if i := strings.Index(appid, "~"); i != -1 {
-		partition, appid = appid[:i], appid[i+1:]
-	}
-	if i := strings.Index(appid, ":"); i != -1 {
-		domain, appid = appid[:i], appid[i+1:]
-	}
-	return partition, domain, appid
-}
-
-// appID returns "appid" or "domain.com:appid".
-func appID(fullAppID string) string {
-	_, dom, dis := parseFullAppID(fullAppID)
-	if dom != "" {
-		return dom + ":" + dis
-	}
-	return dis
-}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
deleted file mode 100644
index db4777e6..00000000
--- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/base/api_base.proto
-
-package base
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type StringProto struct {
-	Value                *string  `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *StringProto) Reset()         { *m = StringProto{} }
-func (m *StringProto) String() string { return proto.CompactTextString(m) }
-func (*StringProto) ProtoMessage()    {}
-func (*StringProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
-}
-func (m *StringProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_StringProto.Unmarshal(m, b)
-}
-func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
-}
-func (dst *StringProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StringProto.Merge(dst, src)
-}
-func (m *StringProto) XXX_Size() int {
-	return xxx_messageInfo_StringProto.Size(m)
-}
-func (m *StringProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_StringProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StringProto proto.InternalMessageInfo
-
-func (m *StringProto) GetValue() string {
-	if m != nil && m.Value != nil {
-		return *m.Value
-	}
-	return ""
-}
-
-type Integer32Proto struct {
-	Value                *int32   `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Integer32Proto) Reset()         { *m = Integer32Proto{} }
-func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer32Proto) ProtoMessage()    {}
-func (*Integer32Proto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
-}
-func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
-}
-func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
-}
-func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Integer32Proto.Merge(dst, src)
-}
-func (m *Integer32Proto) XXX_Size() int {
-	return xxx_messageInfo_Integer32Proto.Size(m)
-}
-func (m *Integer32Proto) XXX_DiscardUnknown() {
-	xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
-
-func (m *Integer32Proto) GetValue() int32 {
-	if m != nil && m.Value != nil {
-		return *m.Value
-	}
-	return 0
-}
-
-type Integer64Proto struct {
-	Value                *int64   `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Integer64Proto) Reset()         { *m = Integer64Proto{} }
-func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer64Proto) ProtoMessage()    {}
-func (*Integer64Proto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
-}
-func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
-}
-func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
-}
-func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Integer64Proto.Merge(dst, src)
-}
-func (m *Integer64Proto) XXX_Size() int {
-	return xxx_messageInfo_Integer64Proto.Size(m)
-}
-func (m *Integer64Proto) XXX_DiscardUnknown() {
-	xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
-
-func (m *Integer64Proto) GetValue() int64 {
-	if m != nil && m.Value != nil {
-		return *m.Value
-	}
-	return 0
-}
-
-type BoolProto struct {
-	Value                *bool    `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *BoolProto) Reset()         { *m = BoolProto{} }
-func (m *BoolProto) String() string { return proto.CompactTextString(m) }
-func (*BoolProto) ProtoMessage()    {}
-func (*BoolProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
-}
-func (m *BoolProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_BoolProto.Unmarshal(m, b)
-}
-func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
-}
-func (dst *BoolProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_BoolProto.Merge(dst, src)
-}
-func (m *BoolProto) XXX_Size() int {
-	return xxx_messageInfo_BoolProto.Size(m)
-}
-func (m *BoolProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_BoolProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BoolProto proto.InternalMessageInfo
-
-func (m *BoolProto) GetValue() bool {
-	if m != nil && m.Value != nil {
-		return *m.Value
-	}
-	return false
-}
-
-type DoubleProto struct {
-	Value                *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DoubleProto) Reset()         { *m = DoubleProto{} }
-func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
-func (*DoubleProto) ProtoMessage()    {}
-func (*DoubleProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
-}
-func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
-}
-func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
-}
-func (dst *DoubleProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DoubleProto.Merge(dst, src)
-}
-func (m *DoubleProto) XXX_Size() int {
-	return xxx_messageInfo_DoubleProto.Size(m)
-}
-func (m *DoubleProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_DoubleProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
-
-func (m *DoubleProto) GetValue() float64 {
-	if m != nil && m.Value != nil {
-		return *m.Value
-	}
-	return 0
-}
-
-type BytesProto struct {
-	Value                []byte   `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *BytesProto) Reset()         { *m = BytesProto{} }
-func (m *BytesProto) String() string { return proto.CompactTextString(m) }
-func (*BytesProto) ProtoMessage()    {}
-func (*BytesProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
-}
-func (m *BytesProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_BytesProto.Unmarshal(m, b)
-}
-func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
-}
-func (dst *BytesProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_BytesProto.Merge(dst, src)
-}
-func (m *BytesProto) XXX_Size() int {
-	return xxx_messageInfo_BytesProto.Size(m)
-}
-func (m *BytesProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_BytesProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BytesProto proto.InternalMessageInfo
-
-func (m *BytesProto) GetValue() []byte {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-type VoidProto struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *VoidProto) Reset()         { *m = VoidProto{} }
-func (m *VoidProto) String() string { return proto.CompactTextString(m) }
-func (*VoidProto) ProtoMessage()    {}
-func (*VoidProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
-}
-func (m *VoidProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_VoidProto.Unmarshal(m, b)
-}
-func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
-}
-func (dst *VoidProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_VoidProto.Merge(dst, src)
-}
-func (m *VoidProto) XXX_Size() int {
-	return xxx_messageInfo_VoidProto.Size(m)
-}
-func (m *VoidProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_VoidProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VoidProto proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
-	proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
-	proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
-	proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
-	proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
-	proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
-	proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
-}
-
-func init() {
-	proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
-}
-
-var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
-	// 199 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
-	0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
-	0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
-	0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
-	0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
-	0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
-	0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
-	0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
-	0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
-	0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
-	0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
-	0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
-	0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
deleted file mode 100644
index 56cd7a3c..00000000
--- a/vendor/google.golang.org/appengine/internal/base/api_base.proto
+++ /dev/null
@@ -1,33 +0,0 @@
-// Built-in base types for API calls. Primarily useful as return types.
-
-syntax = "proto2";
-option go_package = "base";
-
-package appengine.base;
-
-message StringProto {
-  required string value = 1;
-}
-
-message Integer32Proto {
-  required int32 value = 1;
-}
-
-message Integer64Proto {
-  required int64 value = 1;
-}
-
-message BoolProto {
-  required bool value = 1;
-}
-
-message DoubleProto {
-  required double value = 1;
-}
-
-message BytesProto {
-  required bytes value = 1 [ctype=CORD];
-}
-
-message VoidProto {
-}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
deleted file mode 100644
index 2fb74828..00000000
--- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
+++ /dev/null
@@ -1,4367 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
-
-package datastore
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Property_Meaning int32
-
-const (
-	Property_NO_MEANING       Property_Meaning = 0
-	Property_BLOB             Property_Meaning = 14
-	Property_TEXT             Property_Meaning = 15
-	Property_BYTESTRING       Property_Meaning = 16
-	Property_ATOM_CATEGORY    Property_Meaning = 1
-	Property_ATOM_LINK        Property_Meaning = 2
-	Property_ATOM_TITLE       Property_Meaning = 3
-	Property_ATOM_CONTENT     Property_Meaning = 4
-	Property_ATOM_SUMMARY     Property_Meaning = 5
-	Property_ATOM_AUTHOR      Property_Meaning = 6
-	Property_GD_WHEN          Property_Meaning = 7
-	Property_GD_EMAIL         Property_Meaning = 8
-	Property_GEORSS_POINT     Property_Meaning = 9
-	Property_GD_IM            Property_Meaning = 10
-	Property_GD_PHONENUMBER   Property_Meaning = 11
-	Property_GD_POSTALADDRESS Property_Meaning = 12
-	Property_GD_RATING        Property_Meaning = 13
-	Property_BLOBKEY          Property_Meaning = 17
-	Property_ENTITY_PROTO     Property_Meaning = 19
-	Property_INDEX_VALUE      Property_Meaning = 18
-)
-
-var Property_Meaning_name = map[int32]string{
-	0:  "NO_MEANING",
-	14: "BLOB",
-	15: "TEXT",
-	16: "BYTESTRING",
-	1:  "ATOM_CATEGORY",
-	2:  "ATOM_LINK",
-	3:  "ATOM_TITLE",
-	4:  "ATOM_CONTENT",
-	5:  "ATOM_SUMMARY",
-	6:  "ATOM_AUTHOR",
-	7:  "GD_WHEN",
-	8:  "GD_EMAIL",
-	9:  "GEORSS_POINT",
-	10: "GD_IM",
-	11: "GD_PHONENUMBER",
-	12: "GD_POSTALADDRESS",
-	13: "GD_RATING",
-	17: "BLOBKEY",
-	19: "ENTITY_PROTO",
-	18: "INDEX_VALUE",
-}
-var Property_Meaning_value = map[string]int32{
-	"NO_MEANING":       0,
-	"BLOB":             14,
-	"TEXT":             15,
-	"BYTESTRING":       16,
-	"ATOM_CATEGORY":    1,
-	"ATOM_LINK":        2,
-	"ATOM_TITLE":       3,
-	"ATOM_CONTENT":     4,
-	"ATOM_SUMMARY":     5,
-	"ATOM_AUTHOR":      6,
-	"GD_WHEN":          7,
-	"GD_EMAIL":         8,
-	"GEORSS_POINT":     9,
-	"GD_IM":            10,
-	"GD_PHONENUMBER":   11,
-	"GD_POSTALADDRESS": 12,
-	"GD_RATING":        13,
-	"BLOBKEY":          17,
-	"ENTITY_PROTO":     19,
-	"INDEX_VALUE":      18,
-}
-
-func (x Property_Meaning) Enum() *Property_Meaning {
-	p := new(Property_Meaning)
-	*p = x
-	return p
-}
-func (x Property_Meaning) String() string {
-	return proto.EnumName(Property_Meaning_name, int32(x))
-}
-func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
-	if err != nil {
-		return err
-	}
-	*x = Property_Meaning(value)
-	return nil
-}
-func (Property_Meaning) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0}
-}
-
-type Property_FtsTokenizationOption int32
-
-const (
-	Property_HTML Property_FtsTokenizationOption = 1
-	Property_ATOM Property_FtsTokenizationOption = 2
-)
-
-var Property_FtsTokenizationOption_name = map[int32]string{
-	1: "HTML",
-	2: "ATOM",
-}
-var Property_FtsTokenizationOption_value = map[string]int32{
-	"HTML": 1,
-	"ATOM": 2,
-}
-
-func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
-	p := new(Property_FtsTokenizationOption)
-	*p = x
-	return p
-}
-func (x Property_FtsTokenizationOption) String() string {
-	return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
-}
-func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
-	if err != nil {
-		return err
-	}
-	*x = Property_FtsTokenizationOption(value)
-	return nil
-}
-func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1}
-}
-
-type EntityProto_Kind int32
-
-const (
-	EntityProto_GD_CONTACT EntityProto_Kind = 1
-	EntityProto_GD_EVENT   EntityProto_Kind = 2
-	EntityProto_GD_MESSAGE EntityProto_Kind = 3
-)
-
-var EntityProto_Kind_name = map[int32]string{
-	1: "GD_CONTACT",
-	2: "GD_EVENT",
-	3: "GD_MESSAGE",
-}
-var EntityProto_Kind_value = map[string]int32{
-	"GD_CONTACT": 1,
-	"GD_EVENT":   2,
-	"GD_MESSAGE": 3,
-}
-
-func (x EntityProto_Kind) Enum() *EntityProto_Kind {
-	p := new(EntityProto_Kind)
-	*p = x
-	return p
-}
-func (x EntityProto_Kind) String() string {
-	return proto.EnumName(EntityProto_Kind_name, int32(x))
-}
-func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
-	if err != nil {
-		return err
-	}
-	*x = EntityProto_Kind(value)
-	return nil
-}
-func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0}
-}
-
-type Index_Property_Direction int32
-
-const (
-	Index_Property_ASCENDING  Index_Property_Direction = 1
-	Index_Property_DESCENDING Index_Property_Direction = 2
-)
-
-var Index_Property_Direction_name = map[int32]string{
-	1: "ASCENDING",
-	2: "DESCENDING",
-}
-var Index_Property_Direction_value = map[string]int32{
-	"ASCENDING":  1,
-	"DESCENDING": 2,
-}
-
-func (x Index_Property_Direction) Enum() *Index_Property_Direction {
-	p := new(Index_Property_Direction)
-	*p = x
-	return p
-}
-func (x Index_Property_Direction) String() string {
-	return proto.EnumName(Index_Property_Direction_name, int32(x))
-}
-func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
-	if err != nil {
-		return err
-	}
-	*x = Index_Property_Direction(value)
-	return nil
-}
-func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0}
-}
-
-type CompositeIndex_State int32
-
-const (
-	CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
-	CompositeIndex_READ_WRITE CompositeIndex_State = 2
-	CompositeIndex_DELETED    CompositeIndex_State = 3
-	CompositeIndex_ERROR      CompositeIndex_State = 4
-)
-
-var CompositeIndex_State_name = map[int32]string{
-	1: "WRITE_ONLY",
-	2: "READ_WRITE",
-	3: "DELETED",
-	4: "ERROR",
-}
-var CompositeIndex_State_value = map[string]int32{
-	"WRITE_ONLY": 1,
-	"READ_WRITE": 2,
-	"DELETED":    3,
-	"ERROR":      4,
-}
-
-func (x CompositeIndex_State) Enum() *CompositeIndex_State {
-	p := new(CompositeIndex_State)
-	*p = x
-	return p
-}
-func (x CompositeIndex_State) String() string {
-	return proto.EnumName(CompositeIndex_State_name, int32(x))
-}
-func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
-	if err != nil {
-		return err
-	}
-	*x = CompositeIndex_State(value)
-	return nil
-}
-func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0}
-}
-
-type Snapshot_Status int32
-
-const (
-	Snapshot_INACTIVE Snapshot_Status = 0
-	Snapshot_ACTIVE   Snapshot_Status = 1
-)
-
-var Snapshot_Status_name = map[int32]string{
-	0: "INACTIVE",
-	1: "ACTIVE",
-}
-var Snapshot_Status_value = map[string]int32{
-	"INACTIVE": 0,
-	"ACTIVE":   1,
-}
-
-func (x Snapshot_Status) Enum() *Snapshot_Status {
-	p := new(Snapshot_Status)
-	*p = x
-	return p
-}
-func (x Snapshot_Status) String() string {
-	return proto.EnumName(Snapshot_Status_name, int32(x))
-}
-func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
-	if err != nil {
-		return err
-	}
-	*x = Snapshot_Status(value)
-	return nil
-}
-func (Snapshot_Status) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0}
-}
-
-type Query_Hint int32
-
-const (
-	Query_ORDER_FIRST    Query_Hint = 1
-	Query_ANCESTOR_FIRST Query_Hint = 2
-	Query_FILTER_FIRST   Query_Hint = 3
-)
-
-var Query_Hint_name = map[int32]string{
-	1: "ORDER_FIRST",
-	2: "ANCESTOR_FIRST",
-	3: "FILTER_FIRST",
-}
-var Query_Hint_value = map[string]int32{
-	"ORDER_FIRST":    1,
-	"ANCESTOR_FIRST": 2,
-	"FILTER_FIRST":   3,
-}
-
-func (x Query_Hint) Enum() *Query_Hint {
-	p := new(Query_Hint)
-	*p = x
-	return p
-}
-func (x Query_Hint) String() string {
-	return proto.EnumName(Query_Hint_name, int32(x))
-}
-func (x *Query_Hint) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
-	if err != nil {
-		return err
-	}
-	*x = Query_Hint(value)
-	return nil
-}
-func (Query_Hint) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
-}
-
-type Query_Filter_Operator int32
-
-const (
-	Query_Filter_LESS_THAN             Query_Filter_Operator = 1
-	Query_Filter_LESS_THAN_OR_EQUAL    Query_Filter_Operator = 2
-	Query_Filter_GREATER_THAN          Query_Filter_Operator = 3
-	Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
-	Query_Filter_EQUAL                 Query_Filter_Operator = 5
-	Query_Filter_IN                    Query_Filter_Operator = 6
-	Query_Filter_EXISTS                Query_Filter_Operator = 7
-)
-
-var Query_Filter_Operator_name = map[int32]string{
-	1: "LESS_THAN",
-	2: "LESS_THAN_OR_EQUAL",
-	3: "GREATER_THAN",
-	4: "GREATER_THAN_OR_EQUAL",
-	5: "EQUAL",
-	6: "IN",
-	7: "EXISTS",
-}
-var Query_Filter_Operator_value = map[string]int32{
-	"LESS_THAN":             1,
-	"LESS_THAN_OR_EQUAL":    2,
-	"GREATER_THAN":          3,
-	"GREATER_THAN_OR_EQUAL": 4,
-	"EQUAL":                 5,
-	"IN":                    6,
-	"EXISTS":                7,
-}
-
-func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
-	p := new(Query_Filter_Operator)
-	*p = x
-	return p
-}
-func (x Query_Filter_Operator) String() string {
-	return proto.EnumName(Query_Filter_Operator_name, int32(x))
-}
-func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
-	if err != nil {
-		return err
-	}
-	*x = Query_Filter_Operator(value)
-	return nil
-}
-func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0}
-}
-
-type Query_Order_Direction int32
-
-const (
-	Query_Order_ASCENDING  Query_Order_Direction = 1
-	Query_Order_DESCENDING Query_Order_Direction = 2
-)
-
-var Query_Order_Direction_name = map[int32]string{
-	1: "ASCENDING",
-	2: "DESCENDING",
-}
-var Query_Order_Direction_value = map[string]int32{
-	"ASCENDING":  1,
-	"DESCENDING": 2,
-}
-
-func (x Query_Order_Direction) Enum() *Query_Order_Direction {
-	p := new(Query_Order_Direction)
-	*p = x
-	return p
-}
-func (x Query_Order_Direction) String() string {
-	return proto.EnumName(Query_Order_Direction_name, int32(x))
-}
-func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
-	if err != nil {
-		return err
-	}
-	*x = Query_Order_Direction(value)
-	return nil
-}
-func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0}
-}
-
-type Error_ErrorCode int32
-
-const (
-	Error_BAD_REQUEST                  Error_ErrorCode = 1
-	Error_CONCURRENT_TRANSACTION       Error_ErrorCode = 2
-	Error_INTERNAL_ERROR               Error_ErrorCode = 3
-	Error_NEED_INDEX                   Error_ErrorCode = 4
-	Error_TIMEOUT                      Error_ErrorCode = 5
-	Error_PERMISSION_DENIED            Error_ErrorCode = 6
-	Error_BIGTABLE_ERROR               Error_ErrorCode = 7
-	Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
-	Error_CAPABILITY_DISABLED          Error_ErrorCode = 9
-	Error_TRY_ALTERNATE_BACKEND        Error_ErrorCode = 10
-	Error_SAFE_TIME_TOO_OLD            Error_ErrorCode = 11
-)
-
-var Error_ErrorCode_name = map[int32]string{
-	1:  "BAD_REQUEST",
-	2:  "CONCURRENT_TRANSACTION",
-	3:  "INTERNAL_ERROR",
-	4:  "NEED_INDEX",
-	5:  "TIMEOUT",
-	6:  "PERMISSION_DENIED",
-	7:  "BIGTABLE_ERROR",
-	8:  "COMMITTED_BUT_STILL_APPLYING",
-	9:  "CAPABILITY_DISABLED",
-	10: "TRY_ALTERNATE_BACKEND",
-	11: "SAFE_TIME_TOO_OLD",
-}
-var Error_ErrorCode_value = map[string]int32{
-	"BAD_REQUEST":                  1,
-	"CONCURRENT_TRANSACTION":       2,
-	"INTERNAL_ERROR":               3,
-	"NEED_INDEX":                   4,
-	"TIMEOUT":                      5,
-	"PERMISSION_DENIED":            6,
-	"BIGTABLE_ERROR":               7,
-	"COMMITTED_BUT_STILL_APPLYING": 8,
-	"CAPABILITY_DISABLED":          9,
-	"TRY_ALTERNATE_BACKEND":        10,
-	"SAFE_TIME_TOO_OLD":            11,
-}
-
-func (x Error_ErrorCode) Enum() *Error_ErrorCode {
-	p := new(Error_ErrorCode)
-	*p = x
-	return p
-}
-func (x Error_ErrorCode) String() string {
-	return proto.EnumName(Error_ErrorCode_name, int32(x))
-}
-func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
-	if err != nil {
-		return err
-	}
-	*x = Error_ErrorCode(value)
-	return nil
-}
-func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0}
-}
-
-type PutRequest_AutoIdPolicy int32
-
-const (
-	PutRequest_CURRENT    PutRequest_AutoIdPolicy = 0
-	PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
-)
-
-var PutRequest_AutoIdPolicy_name = map[int32]string{
-	0: "CURRENT",
-	1: "SEQUENTIAL",
-}
-var PutRequest_AutoIdPolicy_value = map[string]int32{
-	"CURRENT":    0,
-	"SEQUENTIAL": 1,
-}
-
-func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
-	p := new(PutRequest_AutoIdPolicy)
-	*p = x
-	return p
-}
-func (x PutRequest_AutoIdPolicy) String() string {
-	return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
-}
-func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
-	if err != nil {
-		return err
-	}
-	*x = PutRequest_AutoIdPolicy(value)
-	return nil
-}
-func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0}
-}
-
-type BeginTransactionRequest_TransactionMode int32
-
-const (
-	BeginTransactionRequest_UNKNOWN    BeginTransactionRequest_TransactionMode = 0
-	BeginTransactionRequest_READ_ONLY  BeginTransactionRequest_TransactionMode = 1
-	BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2
-)
-
-var BeginTransactionRequest_TransactionMode_name = map[int32]string{
-	0: "UNKNOWN",
-	1: "READ_ONLY",
-	2: "READ_WRITE",
-}
-var BeginTransactionRequest_TransactionMode_value = map[string]int32{
-	"UNKNOWN":    0,
-	"READ_ONLY":  1,
-	"READ_WRITE": 2,
-}
-
-func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode {
-	p := new(BeginTransactionRequest_TransactionMode)
-	*p = x
-	return p
-}
-func (x BeginTransactionRequest_TransactionMode) String() string {
-	return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x))
-}
-func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode")
-	if err != nil {
-		return err
-	}
-	*x = BeginTransactionRequest_TransactionMode(value)
-	return nil
-}
-func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0}
-}
-
-type Action struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Action) Reset()         { *m = Action{} }
-func (m *Action) String() string { return proto.CompactTextString(m) }
-func (*Action) ProtoMessage()    {}
-func (*Action) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0}
-}
-func (m *Action) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Action.Unmarshal(m, b)
-}
-func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Action.Marshal(b, m, deterministic)
-}
-func (dst *Action) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Action.Merge(dst, src)
-}
-func (m *Action) XXX_Size() int {
-	return xxx_messageInfo_Action.Size(m)
-}
-func (m *Action) XXX_DiscardUnknown() {
-	xxx_messageInfo_Action.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Action proto.InternalMessageInfo
-
-type PropertyValue struct {
-	Int64Value           *int64                        `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
-	BooleanValue         *bool                         `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
-	StringValue          *string                       `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
-	DoubleValue          *float64                      `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
-	Pointvalue           *PropertyValue_PointValue     `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"`
-	Uservalue            *PropertyValue_UserValue      `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"`
-	Referencevalue       *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
-	XXX_unrecognized     []byte                        `json:"-"`
-	XXX_sizecache        int32                         `json:"-"`
-}
-
-func (m *PropertyValue) Reset()         { *m = PropertyValue{} }
-func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue) ProtoMessage()    {}
-func (*PropertyValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1}
-}
-func (m *PropertyValue) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PropertyValue.Unmarshal(m, b)
-}
-func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PropertyValue.Merge(dst, src)
-}
-func (m *PropertyValue) XXX_Size() int {
-	return xxx_messageInfo_PropertyValue.Size(m)
-}
-func (m *PropertyValue) XXX_DiscardUnknown() {
-	xxx_messageInfo_PropertyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue proto.InternalMessageInfo
-
-func (m *PropertyValue) GetInt64Value() int64 {
-	if m != nil && m.Int64Value != nil {
-		return *m.Int64Value
-	}
-	return 0
-}
-
-func (m *PropertyValue) GetBooleanValue() bool {
-	if m != nil && m.BooleanValue != nil {
-		return *m.BooleanValue
-	}
-	return false
-}
-
-func (m *PropertyValue) GetStringValue() string {
-	if m != nil && m.StringValue != nil {
-		return *m.StringValue
-	}
-	return ""
-}
-
-func (m *PropertyValue) GetDoubleValue() float64 {
-	if m != nil && m.DoubleValue != nil {
-		return *m.DoubleValue
-	}
-	return 0
-}
-
-func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
-	if m != nil {
-		return m.Pointvalue
-	}
-	return nil
-}
-
-func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
-	if m != nil {
-		return m.Uservalue
-	}
-	return nil
-}
-
-func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
-	if m != nil {
-		return m.Referencevalue
-	}
-	return nil
-}
-
-type PropertyValue_PointValue struct {
-	X                    *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
-	Y                    *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *PropertyValue_PointValue) Reset()         { *m = PropertyValue_PointValue{} }
-func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_PointValue) ProtoMessage()    {}
-func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0}
-}
-func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src)
-}
-func (m *PropertyValue_PointValue) XXX_Size() int {
-	return xxx_messageInfo_PropertyValue_PointValue.Size(m)
-}
-func (m *PropertyValue_PointValue) XXX_DiscardUnknown() {
-	xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo
-
-func (m *PropertyValue_PointValue) GetX() float64 {
-	if m != nil && m.X != nil {
-		return *m.X
-	}
-	return 0
-}
-
-func (m *PropertyValue_PointValue) GetY() float64 {
-	if m != nil && m.Y != nil {
-		return *m.Y
-	}
-	return 0
-}
-
-type PropertyValue_UserValue struct {
-	Email                *string  `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
-	AuthDomain           *string  `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
-	Nickname             *string  `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
-	FederatedIdentity    *string  `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
-	FederatedProvider    *string  `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *PropertyValue_UserValue) Reset()         { *m = PropertyValue_UserValue{} }
-func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_UserValue) ProtoMessage()    {}
-func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1}
-}
-func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src)
-}
-func (m *PropertyValue_UserValue) XXX_Size() int {
-	return xxx_messageInfo_PropertyValue_UserValue.Size(m)
-}
-func (m *PropertyValue_UserValue) XXX_DiscardUnknown() {
-	xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo
-
-func (m *PropertyValue_UserValue) GetEmail() string {
-	if m != nil && m.Email != nil {
-		return *m.Email
-	}
-	return ""
-}
-
-func (m *PropertyValue_UserValue) GetAuthDomain() string {
-	if m != nil && m.AuthDomain != nil {
-		return *m.AuthDomain
-	}
-	return ""
-}
-
-func (m *PropertyValue_UserValue) GetNickname() string {
-	if m != nil && m.Nickname != nil {
-		return *m.Nickname
-	}
-	return ""
-}
-
-func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
-	if m != nil && m.FederatedIdentity != nil {
-		return *m.FederatedIdentity
-	}
-	return ""
-}
-
-func (m *PropertyValue_UserValue) GetFederatedProvider() string {
-	if m != nil && m.FederatedProvider != nil {
-		return *m.FederatedProvider
-	}
-	return ""
-}
-
-type PropertyValue_ReferenceValue struct {
-	App                  *string                                     `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
-	NameSpace            *string                                     `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
-	Pathelement          []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                                    `json:"-"`
-	XXX_unrecognized     []byte                                      `json:"-"`
-	XXX_sizecache        int32                                       `json:"-"`
-}
-
-func (m *PropertyValue_ReferenceValue) Reset()         { *m = PropertyValue_ReferenceValue{} }
-func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue) ProtoMessage()    {}
-func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2}
-}
-func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src)
-}
-func (m *PropertyValue_ReferenceValue) XXX_Size() int {
-	return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m)
-}
-func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() {
-	xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo
-
-func (m *PropertyValue_ReferenceValue) GetApp() string {
-	if m != nil && m.App != nil {
-		return *m.App
-	}
-	return ""
-}
-
-func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
-	if m != nil && m.NameSpace != nil {
-		return *m.NameSpace
-	}
-	return ""
-}
-
-func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
-	if m != nil {
-		return m.Pathelement
-	}
-	return nil
-}
-
-type PropertyValue_ReferenceValue_PathElement struct {
-	Type                 *string  `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
-	Id                   *int64   `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
-	Name                 *string  `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
-	*m = PropertyValue_ReferenceValue_PathElement{}
-}
-func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage()    {}
-func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0}
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int {
-	return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() {
-	xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
-	if m != nil && m.Type != nil {
-		return *m.Type
-	}
-	return ""
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
-	if m != nil && m.Id != nil {
-		return *m.Id
-	}
-	return 0
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-type Property struct {
-	Meaning               *Property_Meaning               `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
-	MeaningUri            *string                         `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"`
-	Name                  *string                         `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
-	Value                 *PropertyValue                  `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
-	Multiple              *bool                           `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
-	Searchable            *bool                           `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
-	FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
-	Locale                *string                         `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
-	XXX_NoUnkeyedLiteral  struct{}                        `json:"-"`
-	XXX_unrecognized      []byte                          `json:"-"`
-	XXX_sizecache         int32                           `json:"-"`
-}
-
-func (m *Property) Reset()         { *m = Property{} }
-func (m *Property) String() string { return proto.CompactTextString(m) }
-func (*Property) ProtoMessage()    {}
-func (*Property) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2}
-}
-func (m *Property) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Property.Unmarshal(m, b)
-}
-func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Property.Marshal(b, m, deterministic)
-}
-func (dst *Property) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Property.Merge(dst, src)
-}
-func (m *Property) XXX_Size() int {
-	return xxx_messageInfo_Property.Size(m)
-}
-func (m *Property) XXX_DiscardUnknown() {
-	xxx_messageInfo_Property.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Property proto.InternalMessageInfo
-
-const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
-const Default_Property_Searchable bool = false
-const Default_Property_Locale string = "en"
-
-func (m *Property) GetMeaning() Property_Meaning {
-	if m != nil && m.Meaning != nil {
-		return *m.Meaning
-	}
-	return Default_Property_Meaning
-}
-
-func (m *Property) GetMeaningUri() string {
-	if m != nil && m.MeaningUri != nil {
-		return *m.MeaningUri
-	}
-	return ""
-}
-
-func (m *Property) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *Property) GetValue() *PropertyValue {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-func (m *Property) GetMultiple() bool {
-	if m != nil && m.Multiple != nil {
-		return *m.Multiple
-	}
-	return false
-}
-
-func (m *Property) GetSearchable() bool {
-	if m != nil && m.Searchable != nil {
-		return *m.Searchable
-	}
-	return Default_Property_Searchable
-}
-
-func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
-	if m != nil && m.FtsTokenizationOption != nil {
-		return *m.FtsTokenizationOption
-	}
-	return Property_HTML
-}
-
-func (m *Property) GetLocale() string {
-	if m != nil && m.Locale != nil {
-		return *m.Locale
-	}
-	return Default_Property_Locale
-}
-
-type Path struct {
-	Element              []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *Path) Reset()         { *m = Path{} }
-func (m *Path) String() string { return proto.CompactTextString(m) }
-func (*Path) ProtoMessage()    {}
-func (*Path) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3}
-}
-func (m *Path) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Path.Unmarshal(m, b)
-}
-func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Path.Marshal(b, m, deterministic)
-}
-func (dst *Path) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Path.Merge(dst, src)
-}
-func (m *Path) XXX_Size() int {
-	return xxx_messageInfo_Path.Size(m)
-}
-func (m *Path) XXX_DiscardUnknown() {
-	xxx_messageInfo_Path.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Path proto.InternalMessageInfo
-
-func (m *Path) GetElement() []*Path_Element {
-	if m != nil {
-		return m.Element
-	}
-	return nil
-}
-
-type Path_Element struct {
-	Type                 *string  `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
-	Id                   *int64   `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
-	Name                 *string  `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Path_Element) Reset()         { *m = Path_Element{} }
-func (m *Path_Element) String() string { return proto.CompactTextString(m) }
-func (*Path_Element) ProtoMessage()    {}
-func (*Path_Element) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0}
-}
-func (m *Path_Element) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Path_Element.Unmarshal(m, b)
-}
-func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic)
-}
-func (dst *Path_Element) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Path_Element.Merge(dst, src)
-}
-func (m *Path_Element) XXX_Size() int {
-	return xxx_messageInfo_Path_Element.Size(m)
-}
-func (m *Path_Element) XXX_DiscardUnknown() {
-	xxx_messageInfo_Path_Element.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Path_Element proto.InternalMessageInfo
-
-func (m *Path_Element) GetType() string {
-	if m != nil && m.Type != nil {
-		return *m.Type
-	}
-	return ""
-}
-
-func (m *Path_Element) GetId() int64 {
-	if m != nil && m.Id != nil {
-		return *m.Id
-	}
-	return 0
-}
-
-func (m *Path_Element) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-type Reference struct {
-	App                  *string  `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
-	NameSpace            *string  `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
-	Path                 *Path    `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Reference) Reset()         { *m = Reference{} }
-func (m *Reference) String() string { return proto.CompactTextString(m) }
-func (*Reference) ProtoMessage()    {}
-func (*Reference) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4}
-}
-func (m *Reference) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Reference.Unmarshal(m, b)
-}
-func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Reference.Marshal(b, m, deterministic)
-}
-func (dst *Reference) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Reference.Merge(dst, src)
-}
-func (m *Reference) XXX_Size() int {
-	return xxx_messageInfo_Reference.Size(m)
-}
-func (m *Reference) XXX_DiscardUnknown() {
-	xxx_messageInfo_Reference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Reference proto.InternalMessageInfo
-
-func (m *Reference) GetApp() string {
-	if m != nil && m.App != nil {
-		return *m.App
-	}
-	return ""
-}
-
-func (m *Reference) GetNameSpace() string {
-	if m != nil && m.NameSpace != nil {
-		return *m.NameSpace
-	}
-	return ""
-}
-
-func (m *Reference) GetPath() *Path {
-	if m != nil {
-		return m.Path
-	}
-	return nil
-}
-
-type User struct {
-	Email                *string  `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
-	AuthDomain           *string  `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
-	Nickname             *string  `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
-	FederatedIdentity    *string  `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
-	FederatedProvider    *string  `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *User) Reset()         { *m = User{} }
-func (m *User) String() string { return proto.CompactTextString(m) }
-func (*User) ProtoMessage()    {}
-func (*User) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5}
-}
-func (m *User) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_User.Unmarshal(m, b)
-}
-func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_User.Marshal(b, m, deterministic)
-}
-func (dst *User) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_User.Merge(dst, src)
-}
-func (m *User) XXX_Size() int {
-	return xxx_messageInfo_User.Size(m)
-}
-func (m *User) XXX_DiscardUnknown() {
-	xxx_messageInfo_User.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_User proto.InternalMessageInfo
-
-func (m *User) GetEmail() string {
-	if m != nil && m.Email != nil {
-		return *m.Email
-	}
-	return ""
-}
-
-func (m *User) GetAuthDomain() string {
-	if m != nil && m.AuthDomain != nil {
-		return *m.AuthDomain
-	}
-	return ""
-}
-
-func (m *User) GetNickname() string {
-	if m != nil && m.Nickname != nil {
-		return *m.Nickname
-	}
-	return ""
-}
-
-func (m *User) GetFederatedIdentity() string {
-	if m != nil && m.FederatedIdentity != nil {
-		return *m.FederatedIdentity
-	}
-	return ""
-}
-
-func (m *User) GetFederatedProvider() string {
-	if m != nil && m.FederatedProvider != nil {
-		return *m.FederatedProvider
-	}
-	return ""
-}
-
-type EntityProto struct {
-	Key                  *Reference        `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
-	EntityGroup          *Path             `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"`
-	Owner                *User             `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
-	Kind                 *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
-	KindUri              *string           `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"`
-	Property             []*Property       `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
-	RawProperty          []*Property       `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"`
-	Rank                 *int32            `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *EntityProto) Reset()         { *m = EntityProto{} }
-func (m *EntityProto) String() string { return proto.CompactTextString(m) }
-func (*EntityProto) ProtoMessage()    {}
-func (*EntityProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6}
-}
-func (m *EntityProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EntityProto.Unmarshal(m, b)
-}
-func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic)
-}
-func (dst *EntityProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EntityProto.Merge(dst, src)
-}
-func (m *EntityProto) XXX_Size() int {
-	return xxx_messageInfo_EntityProto.Size(m)
-}
-func (m *EntityProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_EntityProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EntityProto proto.InternalMessageInfo
-
-func (m *EntityProto) GetKey() *Reference {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *EntityProto) GetEntityGroup() *Path {
-	if m != nil {
-		return m.EntityGroup
-	}
-	return nil
-}
-
-func (m *EntityProto) GetOwner() *User {
-	if m != nil {
-		return m.Owner
-	}
-	return nil
-}
-
-func (m *EntityProto) GetKind() EntityProto_Kind {
-	if m != nil && m.Kind != nil {
-		return *m.Kind
-	}
-	return EntityProto_GD_CONTACT
-}
-
-func (m *EntityProto) GetKindUri() string {
-	if m != nil && m.KindUri != nil {
-		return *m.KindUri
-	}
-	return ""
-}
-
-func (m *EntityProto) GetProperty() []*Property {
-	if m != nil {
-		return m.Property
-	}
-	return nil
-}
-
-func (m *EntityProto) GetRawProperty() []*Property {
-	if m != nil {
-		return m.RawProperty
-	}
-	return nil
-}
-
-func (m *EntityProto) GetRank() int32 {
-	if m != nil && m.Rank != nil {
-		return *m.Rank
-	}
-	return 0
-}
-
-type CompositeProperty struct {
-	IndexId              *int64   `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"`
-	Value                []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *CompositeProperty) Reset()         { *m = CompositeProperty{} }
-func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
-func (*CompositeProperty) ProtoMessage()    {}
-func (*CompositeProperty) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7}
-}
-func (m *CompositeProperty) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CompositeProperty.Unmarshal(m, b)
-}
-func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic)
-}
-func (dst *CompositeProperty) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompositeProperty.Merge(dst, src)
-}
-func (m *CompositeProperty) XXX_Size() int {
-	return xxx_messageInfo_CompositeProperty.Size(m)
-}
-func (m *CompositeProperty) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompositeProperty.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo
-
-func (m *CompositeProperty) GetIndexId() int64 {
-	if m != nil && m.IndexId != nil {
-		return *m.IndexId
-	}
-	return 0
-}
-
-func (m *CompositeProperty) GetValue() []string {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-type Index struct {
-	EntityType           *string           `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"`
-	Ancestor             *bool             `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
-	Property             []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *Index) Reset()         { *m = Index{} }
-func (m *Index) String() string { return proto.CompactTextString(m) }
-func (*Index) ProtoMessage()    {}
-func (*Index) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8}
-}
-func (m *Index) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Index.Unmarshal(m, b)
-}
-func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Index.Marshal(b, m, deterministic)
-}
-func (dst *Index) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Index.Merge(dst, src)
-}
-func (m *Index) XXX_Size() int {
-	return xxx_messageInfo_Index.Size(m)
-}
-func (m *Index) XXX_DiscardUnknown() {
-	xxx_messageInfo_Index.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Index proto.InternalMessageInfo
-
-func (m *Index) GetEntityType() string {
-	if m != nil && m.EntityType != nil {
-		return *m.EntityType
-	}
-	return ""
-}
-
-func (m *Index) GetAncestor() bool {
-	if m != nil && m.Ancestor != nil {
-		return *m.Ancestor
-	}
-	return false
-}
-
-func (m *Index) GetProperty() []*Index_Property {
-	if m != nil {
-		return m.Property
-	}
-	return nil
-}
-
-type Index_Property struct {
-	Name                 *string                   `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
-	Direction            *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
-	XXX_unrecognized     []byte                    `json:"-"`
-	XXX_sizecache        int32                     `json:"-"`
-}
-
-func (m *Index_Property) Reset()         { *m = Index_Property{} }
-func (m *Index_Property) String() string { return proto.CompactTextString(m) }
-func (*Index_Property) ProtoMessage()    {}
-func (*Index_Property) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0}
-}
-func (m *Index_Property) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Index_Property.Unmarshal(m, b)
-}
-func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic)
-}
-func (dst *Index_Property) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Index_Property.Merge(dst, src)
-}
-func (m *Index_Property) XXX_Size() int {
-	return xxx_messageInfo_Index_Property.Size(m)
-}
-func (m *Index_Property) XXX_DiscardUnknown() {
-	xxx_messageInfo_Index_Property.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Index_Property proto.InternalMessageInfo
-
-const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
-
-func (m *Index_Property) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *Index_Property) GetDirection() Index_Property_Direction {
-	if m != nil && m.Direction != nil {
-		return *m.Direction
-	}
-	return Default_Index_Property_Direction
-}
-
-type CompositeIndex struct {
-	AppId                *string               `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
-	Id                   *int64                `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
-	Definition           *Index                `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
-	State                *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
-	OnlyUseIfRequired    *bool                 `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
-	XXX_unrecognized     []byte                `json:"-"`
-	XXX_sizecache        int32                 `json:"-"`
-}
-
-func (m *CompositeIndex) Reset()         { *m = CompositeIndex{} }
-func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndex) ProtoMessage()    {}
-func (*CompositeIndex) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9}
-}
-func (m *CompositeIndex) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CompositeIndex.Unmarshal(m, b)
-}
-func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic)
-}
-func (dst *CompositeIndex) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompositeIndex.Merge(dst, src)
-}
-func (m *CompositeIndex) XXX_Size() int {
-	return xxx_messageInfo_CompositeIndex.Size(m)
-}
-func (m *CompositeIndex) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompositeIndex.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo
-
-const Default_CompositeIndex_OnlyUseIfRequired bool = false
-
-func (m *CompositeIndex) GetAppId() string {
-	if m != nil && m.AppId != nil {
-		return *m.AppId
-	}
-	return ""
-}
-
-func (m *CompositeIndex) GetId() int64 {
-	if m != nil && m.Id != nil {
-		return *m.Id
-	}
-	return 0
-}
-
-func (m *CompositeIndex) GetDefinition() *Index {
-	if m != nil {
-		return m.Definition
-	}
-	return nil
-}
-
-func (m *CompositeIndex) GetState() CompositeIndex_State {
-	if m != nil && m.State != nil {
-		return *m.State
-	}
-	return CompositeIndex_WRITE_ONLY
-}
-
-func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
-	if m != nil && m.OnlyUseIfRequired != nil {
-		return *m.OnlyUseIfRequired
-	}
-	return Default_CompositeIndex_OnlyUseIfRequired
-}
-
-type IndexPostfix struct {
-	IndexValue           []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"`
-	Key                  *Reference                 `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
-	Before               *bool                      `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
-	XXX_unrecognized     []byte                     `json:"-"`
-	XXX_sizecache        int32                      `json:"-"`
-}
-
-func (m *IndexPostfix) Reset()         { *m = IndexPostfix{} }
-func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix) ProtoMessage()    {}
-func (*IndexPostfix) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10}
-}
-func (m *IndexPostfix) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_IndexPostfix.Unmarshal(m, b)
-}
-func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic)
-}
-func (dst *IndexPostfix) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_IndexPostfix.Merge(dst, src)
-}
-func (m *IndexPostfix) XXX_Size() int {
-	return xxx_messageInfo_IndexPostfix.Size(m)
-}
-func (m *IndexPostfix) XXX_DiscardUnknown() {
-	xxx_messageInfo_IndexPostfix.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo
-
-const Default_IndexPostfix_Before bool = true
-
-func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
-	if m != nil {
-		return m.IndexValue
-	}
-	return nil
-}
-
-func (m *IndexPostfix) GetKey() *Reference {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *IndexPostfix) GetBefore() bool {
-	if m != nil && m.Before != nil {
-		return *m.Before
-	}
-	return Default_IndexPostfix_Before
-}
-
-type IndexPostfix_IndexValue struct {
-	PropertyName         *string        `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"`
-	Value                *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
-	XXX_unrecognized     []byte         `json:"-"`
-	XXX_sizecache        int32          `json:"-"`
-}
-
-func (m *IndexPostfix_IndexValue) Reset()         { *m = IndexPostfix_IndexValue{} }
-func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix_IndexValue) ProtoMessage()    {}
-func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0}
-}
-func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b)
-}
-func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic)
-}
-func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src)
-}
-func (m *IndexPostfix_IndexValue) XXX_Size() int {
-	return xxx_messageInfo_IndexPostfix_IndexValue.Size(m)
-}
-func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() {
-	xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo
-
-func (m *IndexPostfix_IndexValue) GetPropertyName() string {
-	if m != nil && m.PropertyName != nil {
-		return *m.PropertyName
-	}
-	return ""
-}
-
-func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-type IndexPosition struct {
-	Key                  *string  `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
-	Before               *bool    `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *IndexPosition) Reset()         { *m = IndexPosition{} }
-func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
-func (*IndexPosition) ProtoMessage()    {}
-func (*IndexPosition) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11}
-}
-func (m *IndexPosition) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_IndexPosition.Unmarshal(m, b)
-}
-func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic)
-}
-func (dst *IndexPosition) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_IndexPosition.Merge(dst, src)
-}
-func (m *IndexPosition) XXX_Size() int {
-	return xxx_messageInfo_IndexPosition.Size(m)
-}
-func (m *IndexPosition) XXX_DiscardUnknown() {
-	xxx_messageInfo_IndexPosition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPosition proto.InternalMessageInfo
-
-const Default_IndexPosition_Before bool = true
-
-func (m *IndexPosition) GetKey() string {
-	if m != nil && m.Key != nil {
-		return *m.Key
-	}
-	return ""
-}
-
-func (m *IndexPosition) GetBefore() bool {
-	if m != nil && m.Before != nil {
-		return *m.Before
-	}
-	return Default_IndexPosition_Before
-}
-
-type Snapshot struct {
-	Ts                   *int64   `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Snapshot) Reset()         { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage()    {}
-func (*Snapshot) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12}
-}
-func (m *Snapshot) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Snapshot.Unmarshal(m, b)
-}
-func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
-}
-func (dst *Snapshot) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Snapshot.Merge(dst, src)
-}
-func (m *Snapshot) XXX_Size() int {
-	return xxx_messageInfo_Snapshot.Size(m)
-}
-func (m *Snapshot) XXX_DiscardUnknown() {
-	xxx_messageInfo_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Snapshot proto.InternalMessageInfo
-
-func (m *Snapshot) GetTs() int64 {
-	if m != nil && m.Ts != nil {
-		return *m.Ts
-	}
-	return 0
-}
-
-type InternalHeader struct {
-	Qos                  *string  `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *InternalHeader) Reset()         { *m = InternalHeader{} }
-func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
-func (*InternalHeader) ProtoMessage()    {}
-func (*InternalHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13}
-}
-func (m *InternalHeader) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InternalHeader.Unmarshal(m, b)
-}
-func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic)
-}
-func (dst *InternalHeader) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InternalHeader.Merge(dst, src)
-}
-func (m *InternalHeader) XXX_Size() int {
-	return xxx_messageInfo_InternalHeader.Size(m)
-}
-func (m *InternalHeader) XXX_DiscardUnknown() {
-	xxx_messageInfo_InternalHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalHeader proto.InternalMessageInfo
-
-func (m *InternalHeader) GetQos() string {
-	if m != nil && m.Qos != nil {
-		return *m.Qos
-	}
-	return ""
-}
-
-type Transaction struct {
-	Header               *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
-	Handle               *uint64         `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
-	App                  *string         `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
-	MarkChanges          *bool           `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *Transaction) Reset()         { *m = Transaction{} }
-func (m *Transaction) String() string { return proto.CompactTextString(m) }
-func (*Transaction) ProtoMessage()    {}
-func (*Transaction) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14}
-}
-func (m *Transaction) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Transaction.Unmarshal(m, b)
-}
-func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Transaction.Marshal(b, m, deterministic)
-}
-func (dst *Transaction) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Transaction.Merge(dst, src)
-}
-func (m *Transaction) XXX_Size() int {
-	return xxx_messageInfo_Transaction.Size(m)
-}
-func (m *Transaction) XXX_DiscardUnknown() {
-	xxx_messageInfo_Transaction.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Transaction proto.InternalMessageInfo
-
-const Default_Transaction_MarkChanges bool = false
-
-func (m *Transaction) GetHeader() *InternalHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *Transaction) GetHandle() uint64 {
-	if m != nil && m.Handle != nil {
-		return *m.Handle
-	}
-	return 0
-}
-
-func (m *Transaction) GetApp() string {
-	if m != nil && m.App != nil {
-		return *m.App
-	}
-	return ""
-}
-
-func (m *Transaction) GetMarkChanges() bool {
-	if m != nil && m.MarkChanges != nil {
-		return *m.MarkChanges
-	}
-	return Default_Transaction_MarkChanges
-}
-
-type Query struct {
-	Header               *InternalHeader   `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
-	App                  *string           `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
-	NameSpace            *string           `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
-	Kind                 *string           `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
-	Ancestor             *Reference        `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
-	Filter               []*Query_Filter   `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"`
-	SearchQuery          *string           `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"`
-	Order                []*Query_Order    `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"`
-	Hint                 *Query_Hint       `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
-	Count                *int32            `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
-	Offset               *int32            `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
-	Limit                *int32            `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
-	CompiledCursor       *CompiledCursor   `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
-	EndCompiledCursor    *CompiledCursor   `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"`
-	CompositeIndex       []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
-	RequirePerfectPlan   *bool             `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"`
-	KeysOnly             *bool             `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"`
-	Transaction          *Transaction      `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
-	Compile              *bool             `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
-	FailoverMs           *int64            `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
-	Strong               *bool             `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
-	PropertyName         []string          `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
-	GroupByPropertyName  []string          `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"`
-	Distinct             *bool             `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
-	MinSafeTimeSeconds   *int64            `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"`
-	SafeReplicaName      []string          `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"`
-	PersistOffset        *bool             `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *Query) Reset()         { *m = Query{} }
-func (m *Query) String() string { return proto.CompactTextString(m) }
-func (*Query) ProtoMessage()    {}
-func (*Query) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15}
-}
-func (m *Query) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Query.Unmarshal(m, b)
-}
-func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Query.Marshal(b, m, deterministic)
-}
-func (dst *Query) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Query.Merge(dst, src)
-}
-func (m *Query) XXX_Size() int {
-	return xxx_messageInfo_Query.Size(m)
-}
-func (m *Query) XXX_DiscardUnknown() {
-	xxx_messageInfo_Query.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query proto.InternalMessageInfo
-
-const Default_Query_Offset int32 = 0
-const Default_Query_RequirePerfectPlan bool = false
-const Default_Query_KeysOnly bool = false
-const Default_Query_Compile bool = false
-const Default_Query_PersistOffset bool = false
-
-func (m *Query) GetHeader() *InternalHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *Query) GetApp() string {
-	if m != nil && m.App != nil {
-		return *m.App
-	}
-	return ""
-}
-
-func (m *Query) GetNameSpace() string {
-	if m != nil && m.NameSpace != nil {
-		return *m.NameSpace
-	}
-	return ""
-}
-
-func (m *Query) GetKind() string {
-	if m != nil && m.Kind != nil {
-		return *m.Kind
-	}
-	return ""
-}
-
-func (m *Query) GetAncestor() *Reference {
-	if m != nil {
-		return m.Ancestor
-	}
-	return nil
-}
-
-func (m *Query) GetFilter() []*Query_Filter {
-	if m != nil {
-		return m.Filter
-	}
-	return nil
-}
-
-func (m *Query) GetSearchQuery() string {
-	if m != nil && m.SearchQuery != nil {
-		return *m.SearchQuery
-	}
-	return ""
-}
-
-func (m *Query) GetOrder() []*Query_Order {
-	if m != nil {
-		return m.Order
-	}
-	return nil
-}
-
-func (m *Query) GetHint() Query_Hint {
-	if m != nil && m.Hint != nil {
-		return *m.Hint
-	}
-	return Query_ORDER_FIRST
-}
-
-func (m *Query) GetCount() int32 {
-	if m != nil && m.Count != nil {
-		return *m.Count
-	}
-	return 0
-}
-
-func (m *Query) GetOffset() int32 {
-	if m != nil && m.Offset != nil {
-		return *m.Offset
-	}
-	return Default_Query_Offset
-}
-
-func (m *Query) GetLimit() int32 {
-	if m != nil && m.Limit != nil {
-		return *m.Limit
-	}
-	return 0
-}
-
-func (m *Query) GetCompiledCursor() *CompiledCursor {
-	if m != nil {
-		return m.CompiledCursor
-	}
-	return nil
-}
-
-func (m *Query) GetEndCompiledCursor() *CompiledCursor {
-	if m != nil {
-		return m.EndCompiledCursor
-	}
-	return nil
-}
-
-func (m *Query) GetCompositeIndex() []*CompositeIndex {
-	if m != nil {
-		return m.CompositeIndex
-	}
-	return nil
-}
-
-func (m *Query) GetRequirePerfectPlan() bool {
-	if m != nil && m.RequirePerfectPlan != nil {
-		return *m.RequirePerfectPlan
-	}
-	return Default_Query_RequirePerfectPlan
-}
-
-func (m *Query) GetKeysOnly() bool {
-	if m != nil && m.KeysOnly != nil {
-		return *m.KeysOnly
-	}
-	return Default_Query_KeysOnly
-}
-
-func (m *Query) GetTransaction() *Transaction {
-	if m != nil {
-		return m.Transaction
-	}
-	return nil
-}
-
-func (m *Query) GetCompile() bool {
-	if m != nil && m.Compile != nil {
-		return *m.Compile
-	}
-	return Default_Query_Compile
-}
-
-func (m *Query) GetFailoverMs() int64 {
-	if m != nil && m.FailoverMs != nil {
-		return *m.FailoverMs
-	}
-	return 0
-}
-
-func (m *Query) GetStrong() bool {
-	if m != nil && m.Strong != nil {
-		return *m.Strong
-	}
-	return false
-}
-
-func (m *Query) GetPropertyName() []string {
-	if m != nil {
-		return m.PropertyName
-	}
-	return nil
-}
-
-func (m *Query) GetGroupByPropertyName() []string {
-	if m != nil {
-		return m.GroupByPropertyName
-	}
-	return nil
-}
-
-func (m *Query) GetDistinct() bool {
-	if m != nil && m.Distinct != nil {
-		return *m.Distinct
-	}
-	return false
-}
-
-func (m *Query) GetMinSafeTimeSeconds() int64 {
-	if m != nil && m.MinSafeTimeSeconds != nil {
-		return *m.MinSafeTimeSeconds
-	}
-	return 0
-}
-
-func (m *Query) GetSafeReplicaName() []string {
-	if m != nil {
-		return m.SafeReplicaName
-	}
-	return nil
-}
-
-func (m *Query) GetPersistOffset() bool {
-	if m != nil && m.PersistOffset != nil {
-		return *m.PersistOffset
-	}
-	return Default_Query_PersistOffset
-}
-
-type Query_Filter struct {
-	Op                   *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
-	Property             []*Property            `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
-	XXX_unrecognized     []byte                 `json:"-"`
-	XXX_sizecache        int32                  `json:"-"`
-}
-
-func (m *Query_Filter) Reset()         { *m = Query_Filter{} }
-func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
-func (*Query_Filter) ProtoMessage()    {}
-func (*Query_Filter) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
-}
-func (m *Query_Filter) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Query_Filter.Unmarshal(m, b)
-}
-func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic)
-}
-func (dst *Query_Filter) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Query_Filter.Merge(dst, src)
-}
-func (m *Query_Filter) XXX_Size() int {
-	return xxx_messageInfo_Query_Filter.Size(m)
-}
-func (m *Query_Filter) XXX_DiscardUnknown() {
-	xxx_messageInfo_Query_Filter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query_Filter proto.InternalMessageInfo
-
-func (m *Query_Filter) GetOp() Query_Filter_Operator {
-	if m != nil && m.Op != nil {
-		return *m.Op
-	}
-	return Query_Filter_LESS_THAN
-}
-
-func (m *Query_Filter) GetProperty() []*Property {
-	if m != nil {
-		return m.Property
-	}
-	return nil
-}
-
-type Query_Order struct {
-	Property             *string                `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
-	Direction            *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
-	XXX_unrecognized     []byte                 `json:"-"`
-	XXX_sizecache        int32                  `json:"-"`
-}
-
-func (m *Query_Order) Reset()         { *m = Query_Order{} }
-func (m *Query_Order) String() string { return proto.CompactTextString(m) }
-func (*Query_Order) ProtoMessage()    {}
-func (*Query_Order) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1}
-}
-func (m *Query_Order) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Query_Order.Unmarshal(m, b)
-}
-func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic)
-}
-func (dst *Query_Order) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Query_Order.Merge(dst, src)
-}
-func (m *Query_Order) XXX_Size() int {
-	return xxx_messageInfo_Query_Order.Size(m)
-}
-func (m *Query_Order) XXX_DiscardUnknown() {
-	xxx_messageInfo_Query_Order.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query_Order proto.InternalMessageInfo
-
-const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
-
-func (m *Query_Order) GetProperty() string {
-	if m != nil && m.Property != nil {
-		return *m.Property
-	}
-	return ""
-}
-
-func (m *Query_Order) GetDirection() Query_Order_Direction {
-	if m != nil && m.Direction != nil {
-		return *m.Direction
-	}
-	return Default_Query_Order_Direction
-}
-
-type CompiledQuery struct {
-	Primaryscan          *CompiledQuery_PrimaryScan     `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"`
-	Mergejoinscan        []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"`
-	IndexDef             *Index                         `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"`
-	Offset               *int32                         `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
-	Limit                *int32                         `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
-	KeysOnly             *bool                          `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
-	PropertyName         []string                       `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
-	DistinctInfixSize    *int32                         `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"`
-	Entityfilter         *CompiledQuery_EntityFilter    `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
-	XXX_unrecognized     []byte                         `json:"-"`
-	XXX_sizecache        int32                          `json:"-"`
-}
-
-func (m *CompiledQuery) Reset()         { *m = CompiledQuery{} }
-func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery) ProtoMessage()    {}
-func (*CompiledQuery) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16}
-}
-func (m *CompiledQuery) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CompiledQuery.Unmarshal(m, b)
-}
-func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompiledQuery.Merge(dst, src)
-}
-func (m *CompiledQuery) XXX_Size() int {
-	return xxx_messageInfo_CompiledQuery.Size(m)
-}
-func (m *CompiledQuery) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompiledQuery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo
-
-const Default_CompiledQuery_Offset int32 = 0
-
-func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
-	if m != nil {
-		return m.Primaryscan
-	}
-	return nil
-}
-
-func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
-	if m != nil {
-		return m.Mergejoinscan
-	}
-	return nil
-}
-
-func (m *CompiledQuery) GetIndexDef() *Index {
-	if m != nil {
-		return m.IndexDef
-	}
-	return nil
-}
-
-func (m *CompiledQuery) GetOffset() int32 {
-	if m != nil && m.Offset != nil {
-		return *m.Offset
-	}
-	return Default_CompiledQuery_Offset
-}
-
-func (m *CompiledQuery) GetLimit() int32 {
-	if m != nil && m.Limit != nil {
-		return *m.Limit
-	}
-	return 0
-}
-
-func (m *CompiledQuery) GetKeysOnly() bool {
-	if m != nil && m.KeysOnly != nil {
-		return *m.KeysOnly
-	}
-	return false
-}
-
-func (m *CompiledQuery) GetPropertyName() []string {
-	if m != nil {
-		return m.PropertyName
-	}
-	return nil
-}
-
-func (m *CompiledQuery) GetDistinctInfixSize() int32 {
-	if m != nil && m.DistinctInfixSize != nil {
-		return *m.DistinctInfixSize
-	}
-	return 0
-}
-
-func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
-	if m != nil {
-		return m.Entityfilter
-	}
-	return nil
-}
-
-type CompiledQuery_PrimaryScan struct {
-	IndexName                  *string  `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"`
-	StartKey                   *string  `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
-	StartInclusive             *bool    `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"`
-	EndKey                     *string  `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"`
-	EndInclusive               *bool    `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"`
-	StartPostfixValue          []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"`
-	EndPostfixValue            []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"`
-	EndUnappliedLogTimestampUs *int64   `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"`
-	XXX_NoUnkeyedLiteral       struct{} `json:"-"`
-	XXX_unrecognized           []byte   `json:"-"`
-	XXX_sizecache              int32    `json:"-"`
-}
-
-func (m *CompiledQuery_PrimaryScan) Reset()         { *m = CompiledQuery_PrimaryScan{} }
-func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_PrimaryScan) ProtoMessage()    {}
-func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0}
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Size() int {
-	return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo
-
-func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
-	if m != nil && m.IndexName != nil {
-		return *m.IndexName
-	}
-	return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
-	if m != nil && m.StartKey != nil {
-		return *m.StartKey
-	}
-	return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
-	if m != nil && m.StartInclusive != nil {
-		return *m.StartInclusive
-	}
-	return false
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
-	if m != nil && m.EndKey != nil {
-		return *m.EndKey
-	}
-	return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
-	if m != nil && m.EndInclusive != nil {
-		return *m.EndInclusive
-	}
-	return false
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
-	if m != nil {
-		return m.StartPostfixValue
-	}
-	return nil
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
-	if m != nil {
-		return m.EndPostfixValue
-	}
-	return nil
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
-	if m != nil && m.EndUnappliedLogTimestampUs != nil {
-		return *m.EndUnappliedLogTimestampUs
-	}
-	return 0
-}
-
-type CompiledQuery_MergeJoinScan struct {
-	IndexName            *string  `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"`
-	PrefixValue          []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"`
-	ValuePrefix          *bool    `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *CompiledQuery_MergeJoinScan) Reset()         { *m = CompiledQuery_MergeJoinScan{} }
-func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_MergeJoinScan) ProtoMessage()    {}
-func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1}
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Size() int {
-	return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo
-
-const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
-
-func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
-	if m != nil && m.IndexName != nil {
-		return *m.IndexName
-	}
-	return ""
-}
-
-func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
-	if m != nil {
-		return m.PrefixValue
-	}
-	return nil
-}
-
-func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
-	if m != nil && m.ValuePrefix != nil {
-		return *m.ValuePrefix
-	}
-	return Default_CompiledQuery_MergeJoinScan_ValuePrefix
-}
-
-type CompiledQuery_EntityFilter struct {
-	Distinct             *bool      `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
-	Kind                 *string    `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
-	Ancestor             *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
-	XXX_unrecognized     []byte     `json:"-"`
-	XXX_sizecache        int32      `json:"-"`
-}
-
-func (m *CompiledQuery_EntityFilter) Reset()         { *m = CompiledQuery_EntityFilter{} }
-func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_EntityFilter) ProtoMessage()    {}
-func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2}
-}
-func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b)
-}
-func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src)
-}
-func (m *CompiledQuery_EntityFilter) XXX_Size() int {
-	return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m)
-}
-func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo
-
-const Default_CompiledQuery_EntityFilter_Distinct bool = false
-
-func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
-	if m != nil && m.Distinct != nil {
-		return *m.Distinct
-	}
-	return Default_CompiledQuery_EntityFilter_Distinct
-}
-
-func (m *CompiledQuery_EntityFilter) GetKind() string {
-	if m != nil && m.Kind != nil {
-		return *m.Kind
-	}
-	return ""
-}
-
-func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
-	if m != nil {
-		return m.Ancestor
-	}
-	return nil
-}
-
-type CompiledCursor struct {
-	Position             *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
-	XXX_unrecognized     []byte                   `json:"-"`
-	XXX_sizecache        int32                    `json:"-"`
-}
-
-func (m *CompiledCursor) Reset()         { *m = CompiledCursor{} }
-func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor) ProtoMessage()    {}
-func (*CompiledCursor) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17}
-}
-func (m *CompiledCursor) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CompiledCursor.Unmarshal(m, b)
-}
-func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompiledCursor.Merge(dst, src)
-}
-func (m *CompiledCursor) XXX_Size() int {
-	return xxx_messageInfo_CompiledCursor.Size(m)
-}
-func (m *CompiledCursor) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompiledCursor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo
-
-func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
-	if m != nil {
-		return m.Position
-	}
-	return nil
-}
-
-type CompiledCursor_Position struct {
-	StartKey             *string                               `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
-	Indexvalue           []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"`
-	Key                  *Reference                            `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
-	StartInclusive       *bool                                 `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                              `json:"-"`
-	XXX_unrecognized     []byte                                `json:"-"`
-	XXX_sizecache        int32                                 `json:"-"`
-}
-
-func (m *CompiledCursor_Position) Reset()         { *m = CompiledCursor_Position{} }
-func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position) ProtoMessage()    {}
-func (*CompiledCursor_Position) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0}
-}
-func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b)
-}
-func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompiledCursor_Position.Merge(dst, src)
-}
-func (m *CompiledCursor_Position) XXX_Size() int {
-	return xxx_messageInfo_CompiledCursor_Position.Size(m)
-}
-func (m *CompiledCursor_Position) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo
-
-const Default_CompiledCursor_Position_StartInclusive bool = true
-
-func (m *CompiledCursor_Position) GetStartKey() string {
-	if m != nil && m.StartKey != nil {
-		return *m.StartKey
-	}
-	return ""
-}
-
-func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
-	if m != nil {
-		return m.Indexvalue
-	}
-	return nil
-}
-
-func (m *CompiledCursor_Position) GetKey() *Reference {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *CompiledCursor_Position) GetStartInclusive() bool {
-	if m != nil && m.StartInclusive != nil {
-		return *m.StartInclusive
-	}
-	return Default_CompiledCursor_Position_StartInclusive
-}
-
-type CompiledCursor_Position_IndexValue struct {
-	Property             *string        `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
-	Value                *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
-	XXX_unrecognized     []byte         `json:"-"`
-	XXX_sizecache        int32          `json:"-"`
-}
-
-func (m *CompiledCursor_Position_IndexValue) Reset()         { *m = CompiledCursor_Position_IndexValue{} }
-func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position_IndexValue) ProtoMessage()    {}
-func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0}
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Size() int {
-	return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo
-
-func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
-	if m != nil && m.Property != nil {
-		return *m.Property
-	}
-	return ""
-}
-
-func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-type Cursor struct {
-	Cursor               *uint64  `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
-	App                  *string  `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Cursor) Reset()         { *m = Cursor{} }
-func (m *Cursor) String() string { return proto.CompactTextString(m) }
-func (*Cursor) ProtoMessage()    {}
-func (*Cursor) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18}
-}
-func (m *Cursor) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Cursor.Unmarshal(m, b)
-}
-func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Cursor.Marshal(b, m, deterministic)
-}
-func (dst *Cursor) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Cursor.Merge(dst, src)
-}
-func (m *Cursor) XXX_Size() int {
-	return xxx_messageInfo_Cursor.Size(m)
-}
-func (m *Cursor) XXX_DiscardUnknown() {
-	xxx_messageInfo_Cursor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cursor proto.InternalMessageInfo
-
-func (m *Cursor) GetCursor() uint64 {
-	if m != nil && m.Cursor != nil {
-		return *m.Cursor
-	}
-	return 0
-}
-
-func (m *Cursor) GetApp() string {
-	if m != nil && m.App != nil {
-		return *m.App
-	}
-	return ""
-}
-
-type Error struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Error) Reset()         { *m = Error{} }
-func (m *Error) String() string { return proto.CompactTextString(m) }
-func (*Error) ProtoMessage()    {}
-func (*Error) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19}
-}
-func (m *Error) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Error.Unmarshal(m, b)
-}
-func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Error.Marshal(b, m, deterministic)
-}
-func (dst *Error) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Error.Merge(dst, src)
-}
-func (m *Error) XXX_Size() int {
-	return xxx_messageInfo_Error.Size(m)
-}
-func (m *Error) XXX_DiscardUnknown() {
-	xxx_messageInfo_Error.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Error proto.InternalMessageInfo
-
-type Cost struct {
-	IndexWrites             *int32           `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"`
-	IndexWriteBytes         *int32           `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"`
-	EntityWrites            *int32           `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"`
-	EntityWriteBytes        *int32           `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"`
-	Commitcost              *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"`
-	ApproximateStorageDelta *int32           `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"`
-	IdSequenceUpdates       *int32           `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"`
-	XXX_NoUnkeyedLiteral    struct{}         `json:"-"`
-	XXX_unrecognized        []byte           `json:"-"`
-	XXX_sizecache           int32            `json:"-"`
-}
-
-func (m *Cost) Reset()         { *m = Cost{} }
-func (m *Cost) String() string { return proto.CompactTextString(m) }
-func (*Cost) ProtoMessage()    {}
-func (*Cost) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20}
-}
-func (m *Cost) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Cost.Unmarshal(m, b)
-}
-func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Cost.Marshal(b, m, deterministic)
-}
-func (dst *Cost) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Cost.Merge(dst, src)
-}
-func (m *Cost) XXX_Size() int {
-	return xxx_messageInfo_Cost.Size(m)
-}
-func (m *Cost) XXX_DiscardUnknown() {
-	xxx_messageInfo_Cost.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cost proto.InternalMessageInfo
-
-func (m *Cost) GetIndexWrites() int32 {
-	if m != nil && m.IndexWrites != nil {
-		return *m.IndexWrites
-	}
-	return 0
-}
-
-func (m *Cost) GetIndexWriteBytes() int32 {
-	if m != nil && m.IndexWriteBytes != nil {
-		return *m.IndexWriteBytes
-	}
-	return 0
-}
-
-func (m *Cost) GetEntityWrites() int32 {
-	if m != nil && m.EntityWrites != nil {
-		return *m.EntityWrites
-	}
-	return 0
-}
-
-func (m *Cost) GetEntityWriteBytes() int32 {
-	if m != nil && m.EntityWriteBytes != nil {
-		return *m.EntityWriteBytes
-	}
-	return 0
-}
-
-func (m *Cost) GetCommitcost() *Cost_CommitCost {
-	if m != nil {
-		return m.Commitcost
-	}
-	return nil
-}
-
-func (m *Cost) GetApproximateStorageDelta() int32 {
-	if m != nil && m.ApproximateStorageDelta != nil {
-		return *m.ApproximateStorageDelta
-	}
-	return 0
-}
-
-func (m *Cost) GetIdSequenceUpdates() int32 {
-	if m != nil && m.IdSequenceUpdates != nil {
-		return *m.IdSequenceUpdates
-	}
-	return 0
-}
-
-type Cost_CommitCost struct {
-	RequestedEntityPuts    *int32   `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"`
-	RequestedEntityDeletes *int32   `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"`
-	XXX_NoUnkeyedLiteral   struct{} `json:"-"`
-	XXX_unrecognized       []byte   `json:"-"`
-	XXX_sizecache          int32    `json:"-"`
-}
-
-func (m *Cost_CommitCost) Reset()         { *m = Cost_CommitCost{} }
-func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
-func (*Cost_CommitCost) ProtoMessage()    {}
-func (*Cost_CommitCost) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0}
-}
-func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b)
-}
-func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic)
-}
-func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Cost_CommitCost.Merge(dst, src)
-}
-func (m *Cost_CommitCost) XXX_Size() int {
-	return xxx_messageInfo_Cost_CommitCost.Size(m)
-}
-func (m *Cost_CommitCost) XXX_DiscardUnknown() {
-	xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo
-
-func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
-	if m != nil && m.RequestedEntityPuts != nil {
-		return *m.RequestedEntityPuts
-	}
-	return 0
-}
-
-func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
-	if m != nil && m.RequestedEntityDeletes != nil {
-		return *m.RequestedEntityDeletes
-	}
-	return 0
-}
-
-type GetRequest struct {
-	Header               *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
-	Key                  []*Reference    `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
-	Transaction          *Transaction    `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
-	FailoverMs           *int64          `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
-	Strong               *bool           `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
-	AllowDeferred        *bool           `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *GetRequest) Reset()         { *m = GetRequest{} }
-func (m *GetRequest) String() string { return proto.CompactTextString(m) }
-func (*GetRequest) ProtoMessage()    {}
-func (*GetRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21}
-}
-func (m *GetRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_GetRequest.Unmarshal(m, b)
-}
-func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GetRequest.Merge(dst, src)
-}
-func (m *GetRequest) XXX_Size() int {
-	return xxx_messageInfo_GetRequest.Size(m)
-}
-func (m *GetRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_GetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetRequest proto.InternalMessageInfo
-
-const Default_GetRequest_AllowDeferred bool = false
-
-func (m *GetRequest) GetHeader() *InternalHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *GetRequest) GetKey() []*Reference {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *GetRequest) GetTransaction() *Transaction {
-	if m != nil {
-		return m.Transaction
-	}
-	return nil
-}
-
-func (m *GetRequest) GetFailoverMs() int64 {
-	if m != nil && m.FailoverMs != nil {
-		return *m.FailoverMs
-	}
-	return 0
-}
-
-func (m *GetRequest) GetStrong() bool {
-	if m != nil && m.Strong != nil {
-		return *m.Strong
-	}
-	return false
-}
-
-func (m *GetRequest) GetAllowDeferred() bool {
-	if m != nil && m.AllowDeferred != nil {
-		return *m.AllowDeferred
-	}
-	return Default_GetRequest_AllowDeferred
-}
-
-type GetResponse struct {
-	Entity               []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"`
-	Deferred             []*Reference          `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
-	InOrder              *bool                 `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
-	XXX_unrecognized     []byte                `json:"-"`
-	XXX_sizecache        int32                 `json:"-"`
-}
-
-func (m *GetResponse) Reset()         { *m = GetResponse{} }
-func (m *GetResponse) String() string { return proto.CompactTextString(m) }
-func (*GetResponse) ProtoMessage()    {}
-func (*GetResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22}
-}
-func (m *GetResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_GetResponse.Unmarshal(m, b)
-}
-func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GetResponse.Merge(dst, src)
-}
-func (m *GetResponse) XXX_Size() int {
-	return xxx_messageInfo_GetResponse.Size(m)
-}
-func (m *GetResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_GetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetResponse proto.InternalMessageInfo
-
-const Default_GetResponse_InOrder bool = true
-
-func (m *GetResponse) GetEntity() []*GetResponse_Entity {
-	if m != nil {
-		return m.Entity
-	}
-	return nil
-}
-
-func (m *GetResponse) GetDeferred() []*Reference {
-	if m != nil {
-		return m.Deferred
-	}
-	return nil
-}
-
-func (m *GetResponse) GetInOrder() bool {
-	if m != nil && m.InOrder != nil {
-		return *m.InOrder
-	}
-	return Default_GetResponse_InOrder
-}
-
-type GetResponse_Entity struct {
-	Entity               *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
-	Key                  *Reference   `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
-	Version              *int64       `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
-	XXX_unrecognized     []byte       `json:"-"`
-	XXX_sizecache        int32        `json:"-"`
-}
-
-func (m *GetResponse_Entity) Reset()         { *m = GetResponse_Entity{} }
-func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
-func (*GetResponse_Entity) ProtoMessage()    {}
-func (*GetResponse_Entity) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0}
-}
-func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b)
-}
-func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic)
-}
-func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GetResponse_Entity.Merge(dst, src)
-}
-func (m *GetResponse_Entity) XXX_Size() int {
-	return xxx_messageInfo_GetResponse_Entity.Size(m)
-}
-func (m *GetResponse_Entity) XXX_DiscardUnknown() {
-	xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo
-
-func (m *GetResponse_Entity) GetEntity() *EntityProto {
-	if m != nil {
-		return m.Entity
-	}
-	return nil
-}
-
-func (m *GetResponse_Entity) GetKey() *Reference {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *GetResponse_Entity) GetVersion() int64 {
-	if m != nil && m.Version != nil {
-		return *m.Version
-	}
-	return 0
-}
-
-type PutRequest struct {
-	Header               *InternalHeader          `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
-	Entity               []*EntityProto           `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
-	Transaction          *Transaction             `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
-	CompositeIndex       []*CompositeIndex        `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
-	Trusted              *bool                    `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
-	Force                *bool                    `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
-	MarkChanges          *bool                    `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
-	Snapshot             []*Snapshot              `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
-	AutoIdPolicy         *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
-	XXX_unrecognized     []byte                   `json:"-"`
-	XXX_sizecache        int32                    `json:"-"`
-}
-
-func (m *PutRequest) Reset()         { *m = PutRequest{} }
-func (m *PutRequest) String() string { return proto.CompactTextString(m) }
-func (*PutRequest) ProtoMessage()    {}
-func (*PutRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23}
-}
-func (m *PutRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PutRequest.Unmarshal(m, b)
-}
-func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
-}
-func (dst *PutRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PutRequest.Merge(dst, src)
-}
-func (m *PutRequest) XXX_Size() int {
-	return xxx_messageInfo_PutRequest.Size(m)
-}
-func (m *PutRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_PutRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutRequest proto.InternalMessageInfo
-
-const Default_PutRequest_Trusted bool = false
-const Default_PutRequest_Force bool = false
-const Default_PutRequest_MarkChanges bool = false
-const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
-
-func (m *PutRequest) GetHeader() *InternalHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *PutRequest) GetEntity() []*EntityProto {
-	if m != nil {
-		return m.Entity
-	}
-	return nil
-}
-
-func (m *PutRequest) GetTransaction() *Transaction {
-	if m != nil {
-		return m.Transaction
-	}
-	return nil
-}
-
-func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
-	if m != nil {
-		return m.CompositeIndex
-	}
-	return nil
-}
-
-func (m *PutRequest) GetTrusted() bool {
-	if m != nil && m.Trusted != nil {
-		return *m.Trusted
-	}
-	return Default_PutRequest_Trusted
-}
-
-func (m *PutRequest) GetForce() bool {
-	if m != nil && m.Force != nil {
-		return *m.Force
-	}
-	return Default_PutRequest_Force
-}
-
-func (m *PutRequest) GetMarkChanges() bool {
-	if m != nil && m.MarkChanges != nil {
-		return *m.MarkChanges
-	}
-	return Default_PutRequest_MarkChanges
-}
-
-func (m *PutRequest) GetSnapshot() []*Snapshot {
-	if m != nil {
-		return m.Snapshot
-	}
-	return nil
-}
-
-func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
-	if m != nil && m.AutoIdPolicy != nil {
-		return *m.AutoIdPolicy
-	}
-	return Default_PutRequest_AutoIdPolicy
-}
-
-type PutResponse struct {
-	Key                  []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
-	Cost                 *Cost        `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
-	Version              []int64      `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
-	XXX_unrecognized     []byte       `json:"-"`
-	XXX_sizecache        int32        `json:"-"`
-}
-
-func (m *PutResponse) Reset()         { *m = PutResponse{} }
-func (m *PutResponse) String() string { return proto.CompactTextString(m) }
-func (*PutResponse) ProtoMessage()    {}
-func (*PutResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24}
-}
-func (m *PutResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PutResponse.Unmarshal(m, b)
-}
-func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
-}
-func (dst *PutResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PutResponse.Merge(dst, src)
-}
-func (m *PutResponse) XXX_Size() int {
-	return xxx_messageInfo_PutResponse.Size(m)
-}
-func (m *PutResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_PutResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutResponse proto.InternalMessageInfo
-
-func (m *PutResponse) GetKey() []*Reference {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *PutResponse) GetCost() *Cost {
-	if m != nil {
-		return m.Cost
-	}
-	return nil
-}
-
-func (m *PutResponse) GetVersion() []int64 {
-	if m != nil {
-		return m.Version
-	}
-	return nil
-}
-
-type TouchRequest struct {
-	Header               *InternalHeader   `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
-	Key                  []*Reference      `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
-	CompositeIndex       []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
-	Force                *bool             `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
-	Snapshot             []*Snapshot       `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *TouchRequest) Reset()         { *m = TouchRequest{} }
-func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
-func (*TouchRequest) ProtoMessage()    {}
-func (*TouchRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25}
-}
-func (m *TouchRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_TouchRequest.Unmarshal(m, b)
-}
-func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic)
-}
-func (dst *TouchRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TouchRequest.Merge(dst, src)
-}
-func (m *TouchRequest) XXX_Size() int {
-	return xxx_messageInfo_TouchRequest.Size(m)
-}
-func (m *TouchRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_TouchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TouchRequest proto.InternalMessageInfo
-
-const Default_TouchRequest_Force bool = false
-
-func (m *TouchRequest) GetHeader() *InternalHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *TouchRequest) GetKey() []*Reference {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
-	if m != nil {
-		return m.CompositeIndex
-	}
-	return nil
-}
-
-func (m *TouchRequest) GetForce() bool {
-	if m != nil && m.Force != nil {
-		return *m.Force
-	}
-	return Default_TouchRequest_Force
-}
-
-func (m *TouchRequest) GetSnapshot() []*Snapshot {
-	if m != nil {
-		return m.Snapshot
-	}
-	return nil
-}
-
-type TouchResponse struct {
-	Cost                 *Cost    `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *TouchResponse) Reset()         { *m = TouchResponse{} }
-func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
-func (*TouchResponse) ProtoMessage()    {}
-func (*TouchResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26}
-}
-func (m *TouchResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_TouchResponse.Unmarshal(m, b)
-}
-func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic)
-}
-func (dst *TouchResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TouchResponse.Merge(dst, src)
-}
-func (m *TouchResponse) XXX_Size() int {
-	return xxx_messageInfo_TouchResponse.Size(m)
-}
-func (m *TouchResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_TouchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TouchResponse proto.InternalMessageInfo
-
-func (m *TouchResponse) GetCost() *Cost {
-	if m != nil {
-		return m.Cost
-	}
-	return nil
-}
-
-type DeleteRequest struct {
-	Header               *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
-	Key                  []*Reference    `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
-	Transaction          *Transaction    `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
-	Trusted              *bool           `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
-	Force                *bool           `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
-	MarkChanges          *bool           `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
-	Snapshot             []*Snapshot     `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *DeleteRequest) Reset()         { *m = DeleteRequest{} }
-func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteRequest) ProtoMessage()    {}
-func (*DeleteRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27}
-}
-func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
-}
-func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
-}
-func (dst *DeleteRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DeleteRequest.Merge(dst, src)
-}
-func (m *DeleteRequest) XXX_Size() int {
-	return xxx_messageInfo_DeleteRequest.Size(m)
-}
-func (m *DeleteRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
-
-const Default_DeleteRequest_Trusted bool = false
-const Default_DeleteRequest_Force bool = false
-const Default_DeleteRequest_MarkChanges bool = false
-
-func (m *DeleteRequest) GetHeader() *InternalHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *DeleteRequest) GetKey() []*Reference {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *DeleteRequest) GetTransaction() *Transaction {
-	if m != nil {
-		return m.Transaction
-	}
-	return nil
-}
-
-func (m *DeleteRequest) GetTrusted() bool {
-	if m != nil && m.Trusted != nil {
-		return *m.Trusted
-	}
-	return Default_DeleteRequest_Trusted
-}
-
-func (m *DeleteRequest) GetForce() bool {
-	if m != nil && m.Force != nil {
-		return *m.Force
-	}
-	return Default_DeleteRequest_Force
-}
-
-func (m *DeleteRequest) GetMarkChanges() bool {
-	if m != nil && m.MarkChanges != nil {
-		return *m.MarkChanges
-	}
-	return Default_DeleteRequest_MarkChanges
-}
-
-func (m *DeleteRequest) GetSnapshot() []*Snapshot {
-	if m != nil {
-		return m.Snapshot
-	}
-	return nil
-}
-
-type DeleteResponse struct {
-	Cost                 *Cost    `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
-	Version              []int64  `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DeleteResponse) Reset()         { *m = DeleteResponse{} }
-func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteResponse) ProtoMessage()    {}
-func (*DeleteResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28}
-}
-func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
-}
-func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
-}
-func (dst *DeleteResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DeleteResponse.Merge(dst, src)
-}
-func (m *DeleteResponse) XXX_Size() int {
-	return xxx_messageInfo_DeleteResponse.Size(m)
-}
-func (m *DeleteResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
-
-func (m *DeleteResponse) GetCost() *Cost {
-	if m != nil {
-		return m.Cost
-	}
-	return nil
-}
-
-func (m *DeleteResponse) GetVersion() []int64 {
-	if m != nil {
-		return m.Version
-	}
-	return nil
-}
-
-type NextRequest struct {
-	Header               *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
-	Cursor               *Cursor         `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
-	Count                *int32          `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
-	Offset               *int32          `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
-	Compile              *bool           `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *NextRequest) Reset()         { *m = NextRequest{} }
-func (m *NextRequest) String() string { return proto.CompactTextString(m) }
-func (*NextRequest) ProtoMessage()    {}
-func (*NextRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29}
-}
-func (m *NextRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_NextRequest.Unmarshal(m, b)
-}
-func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic)
-}
-func (dst *NextRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_NextRequest.Merge(dst, src)
-}
-func (m *NextRequest) XXX_Size() int {
-	return xxx_messageInfo_NextRequest.Size(m)
-}
-func (m *NextRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_NextRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NextRequest proto.InternalMessageInfo
-
-const Default_NextRequest_Offset int32 = 0
-const Default_NextRequest_Compile bool = false
-
-func (m *NextRequest) GetHeader() *InternalHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *NextRequest) GetCursor() *Cursor {
-	if m != nil {
-		return m.Cursor
-	}
-	return nil
-}
-
-func (m *NextRequest) GetCount() int32 {
-	if m != nil && m.Count != nil {
-		return *m.Count
-	}
-	return 0
-}
-
-func (m *NextRequest) GetOffset() int32 {
-	if m != nil && m.Offset != nil {
-		return *m.Offset
-	}
-	return Default_NextRequest_Offset
-}
-
-func (m *NextRequest) GetCompile() bool {
-	if m != nil && m.Compile != nil {
-		return *m.Compile
-	}
-	return Default_NextRequest_Compile
-}
-
-type QueryResult struct {
-	Cursor               *Cursor           `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
-	Result               []*EntityProto    `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
-	SkippedResults       *int32            `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"`
-	MoreResults          *bool             `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"`
-	KeysOnly             *bool             `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
-	IndexOnly            *bool             `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"`
-	SmallOps             *bool             `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"`
-	CompiledQuery        *CompiledQuery    `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"`
-	CompiledCursor       *CompiledCursor   `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
-	Index                []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
-	Version              []int64           `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *QueryResult) Reset()         { *m = QueryResult{} }
-func (m *QueryResult) String() string { return proto.CompactTextString(m) }
-func (*QueryResult) ProtoMessage()    {}
-func (*QueryResult) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30}
-}
-func (m *QueryResult) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_QueryResult.Unmarshal(m, b)
-}
-func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic)
-}
-func (dst *QueryResult) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_QueryResult.Merge(dst, src)
-}
-func (m *QueryResult) XXX_Size() int {
-	return xxx_messageInfo_QueryResult.Size(m)
-}
-func (m *QueryResult) XXX_DiscardUnknown() {
-	xxx_messageInfo_QueryResult.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryResult proto.InternalMessageInfo
-
-func (m *QueryResult) GetCursor() *Cursor {
-	if m != nil {
-		return m.Cursor
-	}
-	return nil
-}
-
-func (m *QueryResult) GetResult() []*EntityProto {
-	if m != nil {
-		return m.Result
-	}
-	return nil
-}
-
-func (m *QueryResult) GetSkippedResults() int32 {
-	if m != nil && m.SkippedResults != nil {
-		return *m.SkippedResults
-	}
-	return 0
-}
-
-func (m *QueryResult) GetMoreResults() bool {
-	if m != nil && m.MoreResults != nil {
-		return *m.MoreResults
-	}
-	return false
-}
-
-func (m *QueryResult) GetKeysOnly() bool {
-	if m != nil && m.KeysOnly != nil {
-		return *m.KeysOnly
-	}
-	return false
-}
-
-func (m *QueryResult) GetIndexOnly() bool {
-	if m != nil && m.IndexOnly != nil {
-		return *m.IndexOnly
-	}
-	return false
-}
-
-func (m *QueryResult) GetSmallOps() bool {
-	if m != nil && m.SmallOps != nil {
-		return *m.SmallOps
-	}
-	return false
-}
-
-func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
-	if m != nil {
-		return m.CompiledQuery
-	}
-	return nil
-}
-
-func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
-	if m != nil {
-		return m.CompiledCursor
-	}
-	return nil
-}
-
-func (m *QueryResult) GetIndex() []*CompositeIndex {
-	if m != nil {
-		return m.Index
-	}
-	return nil
-}
-
-func (m *QueryResult) GetVersion() []int64 {
-	if m != nil {
-		return m.Version
-	}
-	return nil
-}
-
-type AllocateIdsRequest struct {
-	Header               *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
-	ModelKey             *Reference      `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"`
-	Size                 *int64          `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
-	Max                  *int64          `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
-	Reserve              []*Reference    `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AllocateIdsRequest) Reset()         { *m = AllocateIdsRequest{} }
-func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsRequest) ProtoMessage()    {}
-func (*AllocateIdsRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31}
-}
-func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b)
-}
-func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic)
-}
-func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AllocateIdsRequest.Merge(dst, src)
-}
-func (m *AllocateIdsRequest) XXX_Size() int {
-	return xxx_messageInfo_AllocateIdsRequest.Size(m)
-}
-func (m *AllocateIdsRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo
-
-func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AllocateIdsRequest) GetModelKey() *Reference {
-	if m != nil {
-		return m.ModelKey
-	}
-	return nil
-}
-
-func (m *AllocateIdsRequest) GetSize() int64 {
-	if m != nil && m.Size != nil {
-		return *m.Size
-	}
-	return 0
-}
-
-func (m *AllocateIdsRequest) GetMax() int64 {
-	if m != nil && m.Max != nil {
-		return *m.Max
-	}
-	return 0
-}
-
-func (m *AllocateIdsRequest) GetReserve() []*Reference {
-	if m != nil {
-		return m.Reserve
-	}
-	return nil
-}
-
-type AllocateIdsResponse struct {
-	Start                *int64   `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
-	End                  *int64   `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
-	Cost                 *Cost    `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AllocateIdsResponse) Reset()         { *m = AllocateIdsResponse{} }
-func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsResponse) ProtoMessage()    {}
-func (*AllocateIdsResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32}
-}
-func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b)
-}
-func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic)
-}
-func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AllocateIdsResponse.Merge(dst, src)
-}
-func (m *AllocateIdsResponse) XXX_Size() int {
-	return xxx_messageInfo_AllocateIdsResponse.Size(m)
-}
-func (m *AllocateIdsResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo
-
-func (m *AllocateIdsResponse) GetStart() int64 {
-	if m != nil && m.Start != nil {
-		return *m.Start
-	}
-	return 0
-}
-
-func (m *AllocateIdsResponse) GetEnd() int64 {
-	if m != nil && m.End != nil {
-		return *m.End
-	}
-	return 0
-}
-
-func (m *AllocateIdsResponse) GetCost() *Cost {
-	if m != nil {
-		return m.Cost
-	}
-	return nil
-}
-
-type CompositeIndices struct {
-	Index                []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *CompositeIndices) Reset()         { *m = CompositeIndices{} }
-func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndices) ProtoMessage()    {}
-func (*CompositeIndices) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33}
-}
-func (m *CompositeIndices) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CompositeIndices.Unmarshal(m, b)
-}
-func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic)
-}
-func (dst *CompositeIndices) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompositeIndices.Merge(dst, src)
-}
-func (m *CompositeIndices) XXX_Size() int {
-	return xxx_messageInfo_CompositeIndices.Size(m)
-}
-func (m *CompositeIndices) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompositeIndices.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo
-
-func (m *CompositeIndices) GetIndex() []*CompositeIndex {
-	if m != nil {
-		return m.Index
-	}
-	return nil
-}
-
-type AddActionsRequest struct {
-	Header               *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
-	Transaction          *Transaction    `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
-	Action               []*Action       `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AddActionsRequest) Reset()         { *m = AddActionsRequest{} }
-func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
-func (*AddActionsRequest) ProtoMessage()    {}
-func (*AddActionsRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34}
-}
-func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b)
-}
-func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic)
-}
-func (dst *AddActionsRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AddActionsRequest.Merge(dst, src)
-}
-func (m *AddActionsRequest) XXX_Size() int {
-	return xxx_messageInfo_AddActionsRequest.Size(m)
-}
-func (m *AddActionsRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AddActionsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo
-
-func (m *AddActionsRequest) GetHeader() *InternalHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AddActionsRequest) GetTransaction() *Transaction {
-	if m != nil {
-		return m.Transaction
-	}
-	return nil
-}
-
-func (m *AddActionsRequest) GetAction() []*Action {
-	if m != nil {
-		return m.Action
-	}
-	return nil
-}
-
-type AddActionsResponse struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AddActionsResponse) Reset()         { *m = AddActionsResponse{} }
-func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
-func (*AddActionsResponse) ProtoMessage()    {}
-func (*AddActionsResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35}
-}
-func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b)
-}
-func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic)
-}
-func (dst *AddActionsResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AddActionsResponse.Merge(dst, src)
-}
-func (m *AddActionsResponse) XXX_Size() int {
-	return xxx_messageInfo_AddActionsResponse.Size(m)
-}
-func (m *AddActionsResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AddActionsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo
-
-type BeginTransactionRequest struct {
-	Header               *InternalHeader                          `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
-	App                  *string                                  `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
-	AllowMultipleEg      *bool                                    `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"`
-	DatabaseId           *string                                  `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"`
-	Mode                 *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"`
-	PreviousTransaction  *Transaction                             `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                                 `json:"-"`
-	XXX_unrecognized     []byte                                   `json:"-"`
-	XXX_sizecache        int32                                    `json:"-"`
-}
-
-func (m *BeginTransactionRequest) Reset()         { *m = BeginTransactionRequest{} }
-func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
-func (*BeginTransactionRequest) ProtoMessage()    {}
-func (*BeginTransactionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36}
-}
-func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b)
-}
-func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic)
-}
-func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_BeginTransactionRequest.Merge(dst, src)
-}
-func (m *BeginTransactionRequest) XXX_Size() int {
-	return xxx_messageInfo_BeginTransactionRequest.Size(m)
-}
-func (m *BeginTransactionRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo
-
-const Default_BeginTransactionRequest_AllowMultipleEg bool = false
-const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN
-
-func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *BeginTransactionRequest) GetApp() string {
-	if m != nil && m.App != nil {
-		return *m.App
-	}
-	return ""
-}
-
-func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
-	if m != nil && m.AllowMultipleEg != nil {
-		return *m.AllowMultipleEg
-	}
-	return Default_BeginTransactionRequest_AllowMultipleEg
-}
-
-func (m *BeginTransactionRequest) GetDatabaseId() string {
-	if m != nil && m.DatabaseId != nil {
-		return *m.DatabaseId
-	}
-	return ""
-}
-
-func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode {
-	if m != nil && m.Mode != nil {
-		return *m.Mode
-	}
-	return Default_BeginTransactionRequest_Mode
-}
-
-func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction {
-	if m != nil {
-		return m.PreviousTransaction
-	}
-	return nil
-}
-
-type CommitResponse struct {
-	Cost                 *Cost                     `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
-	Version              []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
-	XXX_unrecognized     []byte                    `json:"-"`
-	XXX_sizecache        int32                     `json:"-"`
-}
-
-func (m *CommitResponse) Reset()         { *m = CommitResponse{} }
-func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse) ProtoMessage()    {}
-func (*CommitResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37}
-}
-func (m *CommitResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CommitResponse.Unmarshal(m, b)
-}
-func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic)
-}
-func (dst *CommitResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CommitResponse.Merge(dst, src)
-}
-func (m *CommitResponse) XXX_Size() int {
-	return xxx_messageInfo_CommitResponse.Size(m)
-}
-func (m *CommitResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_CommitResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CommitResponse proto.InternalMessageInfo
-
-func (m *CommitResponse) GetCost() *Cost {
-	if m != nil {
-		return m.Cost
-	}
-	return nil
-}
-
-func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
-	if m != nil {
-		return m.Version
-	}
-	return nil
-}
-
-type CommitResponse_Version struct {
-	RootEntityKey        *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"`
-	Version              *int64     `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
-	XXX_unrecognized     []byte     `json:"-"`
-	XXX_sizecache        int32      `json:"-"`
-}
-
-func (m *CommitResponse_Version) Reset()         { *m = CommitResponse_Version{} }
-func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse_Version) ProtoMessage()    {}
-func (*CommitResponse_Version) Descriptor() ([]byte, []int) {
-	return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0}
-}
-func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b)
-}
-func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic)
-}
-func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CommitResponse_Version.Merge(dst, src)
-}
-func (m *CommitResponse_Version) XXX_Size() int {
-	return xxx_messageInfo_CommitResponse_Version.Size(m)
-}
-func (m *CommitResponse_Version) XXX_DiscardUnknown() {
-	xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo
-
-func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
-	if m != nil {
-		return m.RootEntityKey
-	}
-	return nil
-}
-
-func (m *CommitResponse_Version) GetVersion() int64 {
-	if m != nil && m.Version != nil {
-		return *m.Version
-	}
-	return 0
-}
-
-func init() {
-	proto.RegisterType((*Action)(nil), "appengine.Action")
-	proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue")
-	proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue")
-	proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue")
-	proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue")
-	proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement")
-	proto.RegisterType((*Property)(nil), "appengine.Property")
-	proto.RegisterType((*Path)(nil), "appengine.Path")
-	proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element")
-	proto.RegisterType((*Reference)(nil), "appengine.Reference")
-	proto.RegisterType((*User)(nil), "appengine.User")
-	proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto")
-	proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty")
-	proto.RegisterType((*Index)(nil), "appengine.Index")
-	proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property")
-	proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex")
-	proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix")
-	proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue")
-	proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition")
-	proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot")
-	proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader")
-	proto.RegisterType((*Transaction)(nil), "appengine.Transaction")
-	proto.RegisterType((*Query)(nil), "appengine.Query")
-	proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter")
-	proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order")
-	proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery")
-	proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan")
-	proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan")
-	proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter")
-	proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor")
-	proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position")
-	proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue")
-	proto.RegisterType((*Cursor)(nil), "appengine.Cursor")
-	proto.RegisterType((*Error)(nil), "appengine.Error")
-	proto.RegisterType((*Cost)(nil), "appengine.Cost")
-	proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost")
-	proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest")
-	proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse")
-	proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity")
-	proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest")
-	proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse")
-	proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest")
-	proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse")
-	proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest")
-	proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse")
-	proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest")
-	proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult")
-	proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest")
-	proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse")
-	proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices")
-	proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest")
-	proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse")
-	proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest")
-	proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse")
-	proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version")
-}
-
-func init() {
-	proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179)
-}
-
-var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{
-	// 4156 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46,
-	0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d,
-	0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48,
-	0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46,
-	0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8,
-	0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24,
-	0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd,
-	0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f,
-	0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74,
-	0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac,
-	0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8,
-	0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9,
-	0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22,
-	0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56,
-	0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b,
-	0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05,
-	0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c,
-	0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2,
-	0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16,
-	0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3,
-	0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce,
-	0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67,
-	0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66,
-	0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13,
-	0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c,
-	0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6,
-	0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a,
-	0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f,
-	0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15,
-	0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a,
-	0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b,
-	0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17,
-	0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad,
-	0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50,
-	0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6,
-	0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b,
-	0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4,
-	0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2,
-	0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc,
-	0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e,
-	0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62,
-	0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee,
-	0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f,
-	0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4,
-	0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02,
-	0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae,
-	0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94,
-	0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f,
-	0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a,
-	0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52,
-	0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc,
-	0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92,
-	0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9,
-	0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50,
-	0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9,
-	0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23,
-	0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f,
-	0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87,
-	0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda,
-	0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b,
-	0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d,
-	0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f,
-	0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b,
-	0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9,
-	0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0,
-	0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68,
-	0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b,
-	0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79,
-	0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63,
-	0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0,
-	0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1,
-	0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10,
-	0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b,
-	0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b,
-	0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08,
-	0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72,
-	0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23,
-	0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91,
-	0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f,
-	0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93,
-	0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c,
-	0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa,
-	0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f,
-	0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef,
-	0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4,
-	0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90,
-	0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f,
-	0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86,
-	0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d,
-	0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee,
-	0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c,
-	0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1,
-	0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7,
-	0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80,
-	0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c,
-	0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21,
-	0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6,
-	0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26,
-	0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba,
-	0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c,
-	0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2,
-	0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8,
-	0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90,
-	0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91,
-	0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58,
-	0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c,
-	0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b,
-	0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f,
-	0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02,
-	0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22,
-	0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b,
-	0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0,
-	0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18,
-	0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8,
-	0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b,
-	0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e,
-	0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84,
-	0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0,
-	0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec,
-	0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7,
-	0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60,
-	0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad,
-	0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4,
-	0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76,
-	0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0,
-	0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba,
-	0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5,
-	0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26,
-	0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60,
-	0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e,
-	0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33,
-	0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e,
-	0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e,
-	0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7,
-	0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45,
-	0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92,
-	0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb,
-	0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc,
-	0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43,
-	0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2,
-	0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5,
-	0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40,
-	0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa,
-	0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc,
-	0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8,
-	0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7,
-	0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6,
-	0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c,
-	0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a,
-	0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e,
-	0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8,
-	0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a,
-	0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55,
-	0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0,
-	0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e,
-	0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78,
-	0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8,
-	0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1,
-	0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a,
-	0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60,
-	0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf,
-	0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c,
-	0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d,
-	0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb,
-	0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f,
-	0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe,
-	0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1,
-	0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80,
-	0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2,
-	0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f,
-	0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d,
-	0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90,
-	0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb,
-	0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe,
-	0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7,
-	0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31,
-	0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe,
-	0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd,
-	0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99,
-	0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41,
-	0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1,
-	0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81,
-	0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63,
-	0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3,
-	0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff,
-	0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f,
-	0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14,
-	0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2,
-	0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4,
-	0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1,
-	0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b,
-	0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9,
-	0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18,
-	0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e,
-	0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9,
-	0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95,
-	0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30,
-	0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c,
-	0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73,
-	0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79,
-	0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59,
-	0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95,
-	0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4,
-	0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74,
-	0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49,
-	0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e,
-	0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42,
-	0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c,
-	0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b,
-	0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca,
-	0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c,
-	0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69,
-	0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a,
-	0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65,
-	0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96,
-	0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4,
-	0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1,
-	0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85,
-	0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf,
-	0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55,
-	0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58,
-	0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6,
-	0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e,
-	0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb,
-	0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd,
-	0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20,
-	0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63,
-	0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b,
-	0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27,
-	0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61,
-	0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44,
-	0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd,
-	0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91,
-	0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3,
-	0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0,
-	0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4,
-	0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74,
-	0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41,
-	0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02,
-	0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1,
-	0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06,
-	0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe,
-	0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59,
-	0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde,
-	0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89,
-	0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0,
-	0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb,
-	0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62,
-	0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5,
-	0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90,
-	0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02,
-	0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06,
-	0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91,
-	0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41,
-	0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37,
-	0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3,
-	0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71,
-	0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd,
-	0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01,
-	0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
deleted file mode 100644
index 497b4d9a..00000000
--- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
+++ /dev/null
@@ -1,551 +0,0 @@
-syntax = "proto2";
-option go_package = "datastore";
-
-package appengine;
-
-message Action{}
-
-message PropertyValue {
-  optional int64 int64Value = 1;
-  optional bool booleanValue = 2;
-  optional string stringValue = 3;
-  optional double doubleValue = 4;
-
-  optional group PointValue = 5 {
-    required double x = 6;
-    required double y = 7;
-  }
-
-  optional group UserValue = 8 {
-    required string email = 9;
-    required string auth_domain = 10;
-    optional string nickname = 11;
-    optional string federated_identity = 21;
-    optional string federated_provider = 22;
-  }
-
-  optional group ReferenceValue = 12 {
-    required string app = 13;
-    optional string name_space = 20;
-    repeated group PathElement = 14 {
-      required string type = 15;
-      optional int64 id = 16;
-      optional string name = 17;
-    }
-  }
-}
-
-message Property {
-  enum Meaning {
-    NO_MEANING = 0;
-    BLOB = 14;
-    TEXT = 15;
-    BYTESTRING = 16;
-
-    ATOM_CATEGORY = 1;
-    ATOM_LINK = 2;
-    ATOM_TITLE = 3;
-    ATOM_CONTENT = 4;
-    ATOM_SUMMARY = 5;
-    ATOM_AUTHOR = 6;
-
-    GD_WHEN = 7;
-    GD_EMAIL = 8;
-    GEORSS_POINT = 9;
-    GD_IM = 10;
-
-    GD_PHONENUMBER = 11;
-    GD_POSTALADDRESS = 12;
-
-    GD_RATING = 13;
-
-    BLOBKEY = 17;
-    ENTITY_PROTO = 19;
-
-    INDEX_VALUE = 18;
-  };
-
-  optional Meaning meaning = 1 [default = NO_MEANING];
-  optional string meaning_uri = 2;
-
-  required string name = 3;
-
-  required PropertyValue value = 5;
-
-  required bool multiple = 4;
-
-  optional bool searchable = 6 [default=false];
-
-  enum FtsTokenizationOption {
-    HTML = 1;
-    ATOM = 2;
-  }
-
-  optional FtsTokenizationOption fts_tokenization_option = 8;
-
-  optional string locale = 9 [default = "en"];
-}
-
-message Path {
-  repeated group Element = 1 {
-    required string type = 2;
-    optional int64 id = 3;
-    optional string name = 4;
-  }
-}
-
-message Reference {
-  required string app = 13;
-  optional string name_space = 20;
-  required Path path = 14;
-}
-
-message User {
-  required string email = 1;
-  required string auth_domain = 2;
-  optional string nickname = 3;
-  optional string federated_identity = 6;
-  optional string federated_provider = 7;
-}
-
-message EntityProto {
-  required Reference key = 13;
-  required Path entity_group = 16;
-  optional User owner = 17;
-
-  enum Kind {
-    GD_CONTACT = 1;
-    GD_EVENT = 2;
-    GD_MESSAGE = 3;
-  }
-  optional Kind kind = 4;
-  optional string kind_uri = 5;
-
-  repeated Property property = 14;
-  repeated Property raw_property = 15;
-
-  optional int32 rank = 18;
-}
-
-message CompositeProperty {
-  required int64 index_id = 1;
-  repeated string value = 2;
-}
-
-message Index {
-  required string entity_type = 1;
-  required bool ancestor = 5;
-  repeated group Property = 2 {
-    required string name = 3;
-    enum Direction {
-      ASCENDING = 1;
-      DESCENDING = 2;
-    }
-    optional Direction direction = 4 [default = ASCENDING];
-  }
-}
-
-message CompositeIndex {
-  required string app_id = 1;
-  required int64 id = 2;
-  required Index definition = 3;
-
-  enum State {
-    WRITE_ONLY = 1;
-    READ_WRITE = 2;
-    DELETED = 3;
-    ERROR = 4;
-  }
-  required State state = 4;
-
-  optional bool only_use_if_required = 6 [default = false];
-}
-
-message IndexPostfix {
-  message IndexValue {
-    required string property_name = 1;
-    required PropertyValue value = 2;
-  }
-
-  repeated IndexValue index_value = 1;
-
-  optional Reference key = 2;
-
-  optional bool before = 3 [default=true];
-}
-
-message IndexPosition {
-  optional string key = 1;
-
-  optional bool before = 2 [default=true];
-}
-
-message Snapshot {
-  enum Status {
-    INACTIVE = 0;
-    ACTIVE = 1;
-  }
-
-  required int64 ts = 1;
-}
-
-message InternalHeader {
-  optional string qos = 1;
-}
-
-message Transaction {
-  optional InternalHeader header = 4;
-  required fixed64 handle = 1;
-  required string app = 2;
-  optional bool mark_changes = 3 [default = false];
-}
-
-message Query {
-  optional InternalHeader header = 39;
-
-  required string app = 1;
-  optional string name_space = 29;
-
-  optional string kind = 3;
-  optional Reference ancestor = 17;
-
-  repeated group Filter = 4 {
-    enum Operator {
-      LESS_THAN = 1;
-      LESS_THAN_OR_EQUAL = 2;
-      GREATER_THAN = 3;
-      GREATER_THAN_OR_EQUAL = 4;
-      EQUAL = 5;
-      IN = 6;
-      EXISTS = 7;
-    }
-
-    required Operator op = 6;
-    repeated Property property = 14;
-  }
-
-  optional string search_query = 8;
-
-  repeated group Order = 9 {
-    enum Direction {
-      ASCENDING = 1;
-      DESCENDING = 2;
-    }
-
-    required string property = 10;
-    optional Direction direction = 11 [default = ASCENDING];
-  }
-
-  enum Hint {
-    ORDER_FIRST = 1;
-    ANCESTOR_FIRST = 2;
-    FILTER_FIRST = 3;
-  }
-  optional Hint hint = 18;
-
-  optional int32 count = 23;
-
-  optional int32 offset = 12 [default = 0];
-
-  optional int32 limit = 16;
-
-  optional CompiledCursor compiled_cursor = 30;
-  optional CompiledCursor end_compiled_cursor = 31;
-
-  repeated CompositeIndex composite_index = 19;
-
-  optional bool require_perfect_plan = 20 [default = false];
-
-  optional bool keys_only = 21 [default = false];
-
-  optional Transaction transaction = 22;
-
-  optional bool compile = 25 [default = false];
-
-  optional int64 failover_ms = 26;
-
-  optional bool strong = 32;
-
-  repeated string property_name = 33;
-
-  repeated string group_by_property_name = 34;
-
-  optional bool distinct = 24;
-
-  optional int64 min_safe_time_seconds = 35;
-
-  repeated string safe_replica_name = 36;
-
-  optional bool persist_offset = 37 [default=false];
-}
-
-message CompiledQuery {
-  required group PrimaryScan = 1 {
-    optional string index_name = 2;
-
-    optional string start_key = 3;
-    optional bool start_inclusive = 4;
-    optional string end_key = 5;
-    optional bool end_inclusive = 6;
-
-    repeated string start_postfix_value = 22;
-    repeated string end_postfix_value = 23;
-
-    optional int64 end_unapplied_log_timestamp_us = 19;
-  }
-
-  repeated group MergeJoinScan = 7 {
-    required string index_name = 8;
-
-    repeated string prefix_value = 9;
-
-    optional bool value_prefix = 20 [default=false];
-  }
-
-  optional Index index_def = 21;
-
-  optional int32 offset = 10 [default = 0];
-
-  optional int32 limit = 11;
-
-  required bool keys_only = 12;
-
-  repeated string property_name = 24;
-
-  optional int32 distinct_infix_size = 25;
-
-  optional group EntityFilter = 13 {
-    optional bool distinct = 14 [default=false];
-
-    optional string kind = 17;
-    optional Reference ancestor = 18;
-  }
-}
-
-message CompiledCursor {
-  optional group Position = 2 {
-    optional string start_key = 27;
-
-    repeated group IndexValue = 29 {
-      optional string property = 30;
-      required PropertyValue value = 31;
-    }
-
-    optional Reference key = 32;
-
-    optional bool start_inclusive = 28 [default=true];
-  }
-}
-
-message Cursor {
-  required fixed64 cursor = 1;
-
-  optional string app = 2;
-}
-
-message Error {
-  enum ErrorCode {
-    BAD_REQUEST = 1;
-    CONCURRENT_TRANSACTION = 2;
-    INTERNAL_ERROR = 3;
-    NEED_INDEX = 4;
-    TIMEOUT = 5;
-    PERMISSION_DENIED = 6;
-    BIGTABLE_ERROR = 7;
-    COMMITTED_BUT_STILL_APPLYING = 8;
-    CAPABILITY_DISABLED = 9;
-    TRY_ALTERNATE_BACKEND = 10;
-    SAFE_TIME_TOO_OLD = 11;
-  }
-}
-
-message Cost {
-  optional int32 index_writes = 1;
-  optional int32 index_write_bytes = 2;
-  optional int32 entity_writes = 3;
-  optional int32 entity_write_bytes = 4;
-  optional group CommitCost = 5 {
-    optional int32 requested_entity_puts = 6;
-    optional int32 requested_entity_deletes = 7;
-  };
-  optional int32 approximate_storage_delta = 8;
-  optional int32 id_sequence_updates = 9;
-}
-
-message GetRequest {
-  optional InternalHeader header = 6;
-
-  repeated Reference key = 1;
-  optional Transaction transaction = 2;
-
-  optional int64 failover_ms = 3;
-
-  optional bool strong = 4;
-
-  optional bool allow_deferred = 5 [default=false];
-}
-
-message GetResponse {
-  repeated group Entity = 1 {
-    optional EntityProto entity = 2;
-    optional Reference key = 4;
-
-    optional int64 version = 3;
-  }
-
-  repeated Reference deferred = 5;
-
-  optional bool in_order = 6 [default=true];
-}
-
-message PutRequest {
-  optional InternalHeader header = 11;
-
-  repeated EntityProto entity = 1;
-  optional Transaction transaction = 2;
-  repeated CompositeIndex composite_index = 3;
-
-  optional bool trusted = 4 [default = false];
-
-  optional bool force = 7 [default = false];
-
-  optional bool mark_changes = 8 [default = false];
-  repeated Snapshot snapshot = 9;
-
-  enum AutoIdPolicy {
-    CURRENT = 0;
-    SEQUENTIAL = 1;
-  }
-  optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
-}
-
-message PutResponse {
-  repeated Reference key = 1;
-  optional Cost cost = 2;
-  repeated int64 version = 3;
-}
-
-message TouchRequest {
-  optional InternalHeader header = 10;
-
-  repeated Reference key = 1;
-  repeated CompositeIndex composite_index = 2;
-  optional bool force = 3 [default = false];
-  repeated Snapshot snapshot = 9;
-}
-
-message TouchResponse {
-  optional Cost cost = 1;
-}
-
-message DeleteRequest {
-  optional InternalHeader header = 10;
-
-  repeated Reference key = 6;
-  optional Transaction transaction = 5;
-
-  optional bool trusted = 4 [default = false];
-
-  optional bool force = 7 [default = false];
-
-  optional bool mark_changes = 8 [default = false];
-  repeated Snapshot snapshot = 9;
-}
-
-message DeleteResponse {
-  optional Cost cost = 1;
-  repeated int64 version = 3;
-}
-
-message NextRequest {
-  optional InternalHeader header = 5;
-
-  required Cursor cursor = 1;
-  optional int32 count = 2;
-
-  optional int32 offset = 4 [default = 0];
-
-  optional bool compile = 3 [default = false];
-}
-
-message QueryResult {
-  optional Cursor cursor = 1;
-
-  repeated EntityProto result = 2;
-
-  optional int32 skipped_results = 7;
-
-  required bool more_results = 3;
-
-  optional bool keys_only = 4;
-
-  optional bool index_only = 9;
-
-  optional bool small_ops = 10;
-
-  optional CompiledQuery compiled_query = 5;
-
-  optional CompiledCursor compiled_cursor = 6;
-
-  repeated CompositeIndex index = 8;
-
-  repeated int64 version = 11;
-}
-
-message AllocateIdsRequest {
-  optional InternalHeader header = 4;
-
-  optional Reference model_key = 1;
-
-  optional int64 size = 2;
-
-  optional int64 max = 3;
-
-  repeated Reference reserve = 5;
-}
-
-message AllocateIdsResponse {
-  required int64 start = 1;
-  required int64 end = 2;
-  optional Cost cost = 3;
-}
-
-message CompositeIndices {
-  repeated CompositeIndex index = 1;
-}
-
-message AddActionsRequest {
-  optional InternalHeader header = 3;
-
-  required Transaction transaction = 1;
-  repeated Action action = 2;
-}
-
-message AddActionsResponse {
-}
-
-message BeginTransactionRequest {
-  optional InternalHeader header = 3;
-
-  required string app = 1;
-  optional bool allow_multiple_eg = 2 [default = false];
-  optional string database_id = 4;
-
-  enum TransactionMode {
-    UNKNOWN = 0;
-    READ_ONLY = 1;
-    READ_WRITE = 2;
-  }
-  optional TransactionMode mode = 5 [default = UNKNOWN];
-
-  optional Transaction previous_transaction = 7;
-}
-
-message CommitResponse {
-  optional Cost cost = 1;
-
-  repeated group Version = 3 {
-    required Reference root_entity_key = 4;
-    required int64 version = 5;
-  }
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
deleted file mode 100644
index 0f95aa91..00000000
--- a/vendor/google.golang.org/appengine/internal/identity.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
-	"context"
-	"os"
-)
-
-var (
-	// This is set to true in identity_classic.go, which is behind the appengine build tag.
-	// The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not
-	// the second generation runtimes (>= Go 1.11), so this indicates whether we're on a
-	// first-gen runtime. See IsStandard below for the second-gen check.
-	appengineStandard bool
-
-	// This is set to true in identity_flex.go, which is behind the appenginevm build tag.
-	appengineFlex bool
-)
-
-// AppID is the implementation of the wrapper function of the same name in
-// ../identity.go. See that file for commentary.
-func AppID(c context.Context) string {
-	return appID(FullyQualifiedAppID(c))
-}
-
-// IsStandard is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsStandard() bool {
-	// appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not
-	// second-gen (>= Go 1.11).
-	return appengineStandard || IsSecondGen()
-}
-
-// IsSecondGen is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsSecondGen() bool {
-	// Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
-	return os.Getenv("GAE_ENV") == "standard"
-}
-
-// IsFlex is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsFlex() bool {
-	return appengineFlex
-}
-
-// IsAppEngine is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsAppEngine() bool {
-	return IsStandard() || IsFlex()
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
deleted file mode 100644
index 5ad3548b..00000000
--- a/vendor/google.golang.org/appengine/internal/identity_classic.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-//go:build appengine
-// +build appengine
-
-package internal
-
-import (
-	"context"
-
-	"appengine"
-)
-
-func init() {
-	appengineStandard = true
-}
-
-func DefaultVersionHostname(ctx context.Context) string {
-	c := fromContext(ctx)
-	if c == nil {
-		panic(errNotAppEngineContext)
-	}
-	return appengine.DefaultVersionHostname(c)
-}
-
-func Datacenter(_ context.Context) string { return appengine.Datacenter() }
-func ServerSoftware() string              { return appengine.ServerSoftware() }
-func InstanceID() string                  { return appengine.InstanceID() }
-func IsDevAppServer() bool                { return appengine.IsDevAppServer() }
-
-func RequestID(ctx context.Context) string {
-	c := fromContext(ctx)
-	if c == nil {
-		panic(errNotAppEngineContext)
-	}
-	return appengine.RequestID(c)
-}
-
-func ModuleName(ctx context.Context) string {
-	c := fromContext(ctx)
-	if c == nil {
-		panic(errNotAppEngineContext)
-	}
-	return appengine.ModuleName(c)
-}
-func VersionID(ctx context.Context) string {
-	c := fromContext(ctx)
-	if c == nil {
-		panic(errNotAppEngineContext)
-	}
-	return appengine.VersionID(c)
-}
-
-func fullyQualifiedAppID(ctx context.Context) string {
-	c := fromContext(ctx)
-	if c == nil {
-		panic(errNotAppEngineContext)
-	}
-	return c.FullyQualifiedAppID()
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go
deleted file mode 100644
index 4201b6b5..00000000
--- a/vendor/google.golang.org/appengine/internal/identity_flex.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2018 Google LLC. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-//go:build appenginevm
-// +build appenginevm
-
-package internal
-
-func init() {
-	appengineFlex = true
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
deleted file mode 100644
index 18ddda3a..00000000
--- a/vendor/google.golang.org/appengine/internal/identity_vm.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-//go:build !appengine
-// +build !appengine
-
-package internal
-
-import (
-	"context"
-	"log"
-	"net/http"
-	"os"
-	"strings"
-)
-
-// These functions are implementations of the wrapper functions
-// in ../appengine/identity.go. See that file for commentary.
-
-const (
-	hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
-	hRequestLogId           = "X-AppEngine-Request-Log-Id"
-	hDatacenter             = "X-AppEngine-Datacenter"
-)
-
-func ctxHeaders(ctx context.Context) http.Header {
-	c := fromContext(ctx)
-	if c == nil {
-		return nil
-	}
-	return c.Request().Header
-}
-
-func DefaultVersionHostname(ctx context.Context) string {
-	return ctxHeaders(ctx).Get(hDefaultVersionHostname)
-}
-
-func RequestID(ctx context.Context) string {
-	return ctxHeaders(ctx).Get(hRequestLogId)
-}
-
-func Datacenter(ctx context.Context) string {
-	if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
-		return dc
-	}
-	// If the header isn't set, read zone from the metadata service.
-	// It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
-	zone, err := getMetadata("instance/zone")
-	if err != nil {
-		log.Printf("Datacenter: %v", err)
-		return ""
-	}
-	parts := strings.Split(string(zone), "/")
-	if len(parts) == 0 {
-		return ""
-	}
-	return parts[len(parts)-1]
-}
-
-func ServerSoftware() string {
-	// TODO(dsymonds): Remove fallback when we've verified this.
-	if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
-		return s
-	}
-	if s := os.Getenv("GAE_ENV"); s != "" {
-		return s
-	}
-	return "Google App Engine/1.x.x"
-}
-
-// TODO(dsymonds): Remove the metadata fetches.
-
-func ModuleName(_ context.Context) string {
-	if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
-		return s
-	}
-	if s := os.Getenv("GAE_SERVICE"); s != "" {
-		return s
-	}
-	return string(mustGetMetadata("instance/attributes/gae_backend_name"))
-}
-
-func VersionID(_ context.Context) string {
-	if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
-		return s1 + "." + s2
-	}
-	if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
-		return s1 + "." + s2
-	}
-	return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
-}
-
-func InstanceID() string {
-	if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
-		return s
-	}
-	if s := os.Getenv("GAE_INSTANCE"); s != "" {
-		return s
-	}
-	return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
-}
-
-func partitionlessAppID() string {
-	// gae_project has everything except the partition prefix.
-	if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
-		return appID
-	}
-	if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
-		return project
-	}
-	return string(mustGetMetadata("instance/attributes/gae_project"))
-}
-
-func fullyQualifiedAppID(_ context.Context) string {
-	if s := os.Getenv("GAE_APPLICATION"); s != "" {
-		return s
-	}
-	appID := partitionlessAppID()
-
-	part := os.Getenv("GAE_PARTITION")
-	if part == "" {
-		part = string(mustGetMetadata("instance/attributes/gae_partition"))
-	}
-
-	if part != "" {
-		appID = part + "~" + appID
-	}
-	return appID
-}
-
-func IsDevAppServer() bool {
-	return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev"
-}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
deleted file mode 100644
index 051ea398..00000000
--- a/vendor/google.golang.org/appengine/internal/internal.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package internal provides support for package appengine.
-//
-// Programs should not use this package directly. Its API is not stable.
-// Use packages appengine and appengine/* instead.
-package internal
-
-import (
-	"fmt"
-
-	"github.com/golang/protobuf/proto"
-
-	remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-// errorCodeMaps is a map of service name to the error code map for the service.
-var errorCodeMaps = make(map[string]map[int32]string)
-
-// RegisterErrorCodeMap is called from API implementations to register their
-// error code map. This should only be called from init functions.
-func RegisterErrorCodeMap(service string, m map[int32]string) {
-	errorCodeMaps[service] = m
-}
-
-type timeoutCodeKey struct {
-	service string
-	code    int32
-}
-
-// timeoutCodes is the set of service+code pairs that represent timeouts.
-var timeoutCodes = make(map[timeoutCodeKey]bool)
-
-func RegisterTimeoutErrorCode(service string, code int32) {
-	timeoutCodes[timeoutCodeKey{service, code}] = true
-}
-
-// APIError is the type returned by appengine.Context's Call method
-// when an API call fails in an API-specific way. This may be, for instance,
-// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
-type APIError struct {
-	Service string
-	Detail  string
-	Code    int32 // API-specific error code
-}
-
-func (e *APIError) Error() string {
-	if e.Code == 0 {
-		if e.Detail == "" {
-			return "APIError "
-		}
-		return e.Detail
-	}
-	s := fmt.Sprintf("API error %d", e.Code)
-	if m, ok := errorCodeMaps[e.Service]; ok {
-		s += " (" + e.Service + ": " + m[e.Code] + ")"
-	} else {
-		// Shouldn't happen, but provide a bit more detail if it does.
-		s = e.Service + " " + s
-	}
-	if e.Detail != "" {
-		s += ": " + e.Detail
-	}
-	return s
-}
-
-func (e *APIError) IsTimeout() bool {
-	return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
-}
-
-// CallError is the type returned by appengine.Context's Call method when an
-// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
-type CallError struct {
-	Detail string
-	Code   int32
-	// TODO: Remove this if we get a distinguishable error code.
-	Timeout bool
-}
-
-func (e *CallError) Error() string {
-	var msg string
-	switch remotepb.RpcError_ErrorCode(e.Code) {
-	case remotepb.RpcError_UNKNOWN:
-		return e.Detail
-	case remotepb.RpcError_OVER_QUOTA:
-		msg = "Over quota"
-	case remotepb.RpcError_CAPABILITY_DISABLED:
-		msg = "Capability disabled"
-	case remotepb.RpcError_CANCELLED:
-		msg = "Canceled"
-	default:
-		msg = fmt.Sprintf("Call error %d", e.Code)
-	}
-	s := msg + ": " + e.Detail
-	if e.Timeout {
-		s += " (timeout)"
-	}
-	return s
-}
-
-func (e *CallError) IsTimeout() bool {
-	return e.Timeout
-}
-
-// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
-// The function should be prepared to be called on the same message more than once; it should only modify the
-// RPC request the first time.
-var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
deleted file mode 100644
index 8545ac4a..00000000
--- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
+++ /dev/null
@@ -1,1313 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/log/log_service.proto
-
-package log
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type LogServiceError_ErrorCode int32
-
-const (
-	LogServiceError_OK              LogServiceError_ErrorCode = 0
-	LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
-	LogServiceError_STORAGE_ERROR   LogServiceError_ErrorCode = 2
-)
-
-var LogServiceError_ErrorCode_name = map[int32]string{
-	0: "OK",
-	1: "INVALID_REQUEST",
-	2: "STORAGE_ERROR",
-}
-var LogServiceError_ErrorCode_value = map[string]int32{
-	"OK":              0,
-	"INVALID_REQUEST": 1,
-	"STORAGE_ERROR":   2,
-}
-
-func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
-	p := new(LogServiceError_ErrorCode)
-	*p = x
-	return p
-}
-func (x LogServiceError_ErrorCode) String() string {
-	return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
-}
-func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
-	if err != nil {
-		return err
-	}
-	*x = LogServiceError_ErrorCode(value)
-	return nil
-}
-func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0}
-}
-
-type LogServiceError struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LogServiceError) Reset()         { *m = LogServiceError{} }
-func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
-func (*LogServiceError) ProtoMessage()    {}
-func (*LogServiceError) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{0}
-}
-func (m *LogServiceError) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LogServiceError.Unmarshal(m, b)
-}
-func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic)
-}
-func (dst *LogServiceError) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LogServiceError.Merge(dst, src)
-}
-func (m *LogServiceError) XXX_Size() int {
-	return xxx_messageInfo_LogServiceError.Size(m)
-}
-func (m *LogServiceError) XXX_DiscardUnknown() {
-	xxx_messageInfo_LogServiceError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogServiceError proto.InternalMessageInfo
-
-type UserAppLogLine struct {
-	TimestampUsec        *int64   `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"`
-	Level                *int64   `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
-	Message              *string  `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *UserAppLogLine) Reset()         { *m = UserAppLogLine{} }
-func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogLine) ProtoMessage()    {}
-func (*UserAppLogLine) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{1}
-}
-func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b)
-}
-func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic)
-}
-func (dst *UserAppLogLine) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UserAppLogLine.Merge(dst, src)
-}
-func (m *UserAppLogLine) XXX_Size() int {
-	return xxx_messageInfo_UserAppLogLine.Size(m)
-}
-func (m *UserAppLogLine) XXX_DiscardUnknown() {
-	xxx_messageInfo_UserAppLogLine.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo
-
-func (m *UserAppLogLine) GetTimestampUsec() int64 {
-	if m != nil && m.TimestampUsec != nil {
-		return *m.TimestampUsec
-	}
-	return 0
-}
-
-func (m *UserAppLogLine) GetLevel() int64 {
-	if m != nil && m.Level != nil {
-		return *m.Level
-	}
-	return 0
-}
-
-func (m *UserAppLogLine) GetMessage() string {
-	if m != nil && m.Message != nil {
-		return *m.Message
-	}
-	return ""
-}
-
-type UserAppLogGroup struct {
-	LogLine              []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *UserAppLogGroup) Reset()         { *m = UserAppLogGroup{} }
-func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogGroup) ProtoMessage()    {}
-func (*UserAppLogGroup) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{2}
-}
-func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b)
-}
-func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic)
-}
-func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UserAppLogGroup.Merge(dst, src)
-}
-func (m *UserAppLogGroup) XXX_Size() int {
-	return xxx_messageInfo_UserAppLogGroup.Size(m)
-}
-func (m *UserAppLogGroup) XXX_DiscardUnknown() {
-	xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo
-
-func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
-	if m != nil {
-		return m.LogLine
-	}
-	return nil
-}
-
-type FlushRequest struct {
-	Logs                 []byte   `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *FlushRequest) Reset()         { *m = FlushRequest{} }
-func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
-func (*FlushRequest) ProtoMessage()    {}
-func (*FlushRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{3}
-}
-func (m *FlushRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FlushRequest.Unmarshal(m, b)
-}
-func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic)
-}
-func (dst *FlushRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FlushRequest.Merge(dst, src)
-}
-func (m *FlushRequest) XXX_Size() int {
-	return xxx_messageInfo_FlushRequest.Size(m)
-}
-func (m *FlushRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_FlushRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FlushRequest proto.InternalMessageInfo
-
-func (m *FlushRequest) GetLogs() []byte {
-	if m != nil {
-		return m.Logs
-	}
-	return nil
-}
-
-type SetStatusRequest struct {
-	Status               *string  `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *SetStatusRequest) Reset()         { *m = SetStatusRequest{} }
-func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
-func (*SetStatusRequest) ProtoMessage()    {}
-func (*SetStatusRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{4}
-}
-func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b)
-}
-func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic)
-}
-func (dst *SetStatusRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SetStatusRequest.Merge(dst, src)
-}
-func (m *SetStatusRequest) XXX_Size() int {
-	return xxx_messageInfo_SetStatusRequest.Size(m)
-}
-func (m *SetStatusRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_SetStatusRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo
-
-func (m *SetStatusRequest) GetStatus() string {
-	if m != nil && m.Status != nil {
-		return *m.Status
-	}
-	return ""
-}
-
-type LogOffset struct {
-	RequestId            []byte   `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LogOffset) Reset()         { *m = LogOffset{} }
-func (m *LogOffset) String() string { return proto.CompactTextString(m) }
-func (*LogOffset) ProtoMessage()    {}
-func (*LogOffset) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{5}
-}
-func (m *LogOffset) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LogOffset.Unmarshal(m, b)
-}
-func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic)
-}
-func (dst *LogOffset) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LogOffset.Merge(dst, src)
-}
-func (m *LogOffset) XXX_Size() int {
-	return xxx_messageInfo_LogOffset.Size(m)
-}
-func (m *LogOffset) XXX_DiscardUnknown() {
-	xxx_messageInfo_LogOffset.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogOffset proto.InternalMessageInfo
-
-func (m *LogOffset) GetRequestId() []byte {
-	if m != nil {
-		return m.RequestId
-	}
-	return nil
-}
-
-type LogLine struct {
-	Time                 *int64   `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
-	Level                *int32   `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
-	LogMessage           *string  `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LogLine) Reset()         { *m = LogLine{} }
-func (m *LogLine) String() string { return proto.CompactTextString(m) }
-func (*LogLine) ProtoMessage()    {}
-func (*LogLine) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{6}
-}
-func (m *LogLine) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LogLine.Unmarshal(m, b)
-}
-func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LogLine.Marshal(b, m, deterministic)
-}
-func (dst *LogLine) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LogLine.Merge(dst, src)
-}
-func (m *LogLine) XXX_Size() int {
-	return xxx_messageInfo_LogLine.Size(m)
-}
-func (m *LogLine) XXX_DiscardUnknown() {
-	xxx_messageInfo_LogLine.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogLine proto.InternalMessageInfo
-
-func (m *LogLine) GetTime() int64 {
-	if m != nil && m.Time != nil {
-		return *m.Time
-	}
-	return 0
-}
-
-func (m *LogLine) GetLevel() int32 {
-	if m != nil && m.Level != nil {
-		return *m.Level
-	}
-	return 0
-}
-
-func (m *LogLine) GetLogMessage() string {
-	if m != nil && m.LogMessage != nil {
-		return *m.LogMessage
-	}
-	return ""
-}
-
-type RequestLog struct {
-	AppId                   *string    `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
-	ModuleId                *string    `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
-	VersionId               *string    `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"`
-	RequestId               []byte     `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"`
-	Offset                  *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
-	Ip                      *string    `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
-	Nickname                *string    `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
-	StartTime               *int64     `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"`
-	EndTime                 *int64     `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"`
-	Latency                 *int64     `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
-	Mcycles                 *int64     `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
-	Method                  *string    `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
-	Resource                *string    `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
-	HttpVersion             *string    `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"`
-	Status                  *int32     `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
-	ResponseSize            *int64     `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"`
-	Referrer                *string    `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
-	UserAgent               *string    `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"`
-	UrlMapEntry             *string    `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"`
-	Combined                *string    `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
-	ApiMcycles              *int64     `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"`
-	Host                    *string    `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
-	Cost                    *float64   `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
-	TaskQueueName           *string    `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"`
-	TaskName                *string    `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"`
-	WasLoadingRequest       *bool      `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"`
-	PendingTime             *int64     `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"`
-	ReplicaIndex            *int32     `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"`
-	Finished                *bool      `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
-	CloneKey                []byte     `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"`
-	Line                    []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
-	LinesIncomplete         *bool      `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"`
-	AppEngineRelease        []byte     `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"`
-	ExitReason              *int32     `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"`
-	WasThrottledForTime     *bool      `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"`
-	WasThrottledForRequests *bool      `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"`
-	ThrottledTime           *int64     `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
-	ServerName              []byte     `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"`
-	XXX_NoUnkeyedLiteral    struct{}   `json:"-"`
-	XXX_unrecognized        []byte     `json:"-"`
-	XXX_sizecache           int32      `json:"-"`
-}
-
-func (m *RequestLog) Reset()         { *m = RequestLog{} }
-func (m *RequestLog) String() string { return proto.CompactTextString(m) }
-func (*RequestLog) ProtoMessage()    {}
-func (*RequestLog) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{7}
-}
-func (m *RequestLog) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_RequestLog.Unmarshal(m, b)
-}
-func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic)
-}
-func (dst *RequestLog) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RequestLog.Merge(dst, src)
-}
-func (m *RequestLog) XXX_Size() int {
-	return xxx_messageInfo_RequestLog.Size(m)
-}
-func (m *RequestLog) XXX_DiscardUnknown() {
-	xxx_messageInfo_RequestLog.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestLog proto.InternalMessageInfo
-
-const Default_RequestLog_ModuleId string = "default"
-const Default_RequestLog_ReplicaIndex int32 = -1
-const Default_RequestLog_Finished bool = true
-
-func (m *RequestLog) GetAppId() string {
-	if m != nil && m.AppId != nil {
-		return *m.AppId
-	}
-	return ""
-}
-
-func (m *RequestLog) GetModuleId() string {
-	if m != nil && m.ModuleId != nil {
-		return *m.ModuleId
-	}
-	return Default_RequestLog_ModuleId
-}
-
-func (m *RequestLog) GetVersionId() string {
-	if m != nil && m.VersionId != nil {
-		return *m.VersionId
-	}
-	return ""
-}
-
-func (m *RequestLog) GetRequestId() []byte {
-	if m != nil {
-		return m.RequestId
-	}
-	return nil
-}
-
-func (m *RequestLog) GetOffset() *LogOffset {
-	if m != nil {
-		return m.Offset
-	}
-	return nil
-}
-
-func (m *RequestLog) GetIp() string {
-	if m != nil && m.Ip != nil {
-		return *m.Ip
-	}
-	return ""
-}
-
-func (m *RequestLog) GetNickname() string {
-	if m != nil && m.Nickname != nil {
-		return *m.Nickname
-	}
-	return ""
-}
-
-func (m *RequestLog) GetStartTime() int64 {
-	if m != nil && m.StartTime != nil {
-		return *m.StartTime
-	}
-	return 0
-}
-
-func (m *RequestLog) GetEndTime() int64 {
-	if m != nil && m.EndTime != nil {
-		return *m.EndTime
-	}
-	return 0
-}
-
-func (m *RequestLog) GetLatency() int64 {
-	if m != nil && m.Latency != nil {
-		return *m.Latency
-	}
-	return 0
-}
-
-func (m *RequestLog) GetMcycles() int64 {
-	if m != nil && m.Mcycles != nil {
-		return *m.Mcycles
-	}
-	return 0
-}
-
-func (m *RequestLog) GetMethod() string {
-	if m != nil && m.Method != nil {
-		return *m.Method
-	}
-	return ""
-}
-
-func (m *RequestLog) GetResource() string {
-	if m != nil && m.Resource != nil {
-		return *m.Resource
-	}
-	return ""
-}
-
-func (m *RequestLog) GetHttpVersion() string {
-	if m != nil && m.HttpVersion != nil {
-		return *m.HttpVersion
-	}
-	return ""
-}
-
-func (m *RequestLog) GetStatus() int32 {
-	if m != nil && m.Status != nil {
-		return *m.Status
-	}
-	return 0
-}
-
-func (m *RequestLog) GetResponseSize() int64 {
-	if m != nil && m.ResponseSize != nil {
-		return *m.ResponseSize
-	}
-	return 0
-}
-
-func (m *RequestLog) GetReferrer() string {
-	if m != nil && m.Referrer != nil {
-		return *m.Referrer
-	}
-	return ""
-}
-
-func (m *RequestLog) GetUserAgent() string {
-	if m != nil && m.UserAgent != nil {
-		return *m.UserAgent
-	}
-	return ""
-}
-
-func (m *RequestLog) GetUrlMapEntry() string {
-	if m != nil && m.UrlMapEntry != nil {
-		return *m.UrlMapEntry
-	}
-	return ""
-}
-
-func (m *RequestLog) GetCombined() string {
-	if m != nil && m.Combined != nil {
-		return *m.Combined
-	}
-	return ""
-}
-
-func (m *RequestLog) GetApiMcycles() int64 {
-	if m != nil && m.ApiMcycles != nil {
-		return *m.ApiMcycles
-	}
-	return 0
-}
-
-func (m *RequestLog) GetHost() string {
-	if m != nil && m.Host != nil {
-		return *m.Host
-	}
-	return ""
-}
-
-func (m *RequestLog) GetCost() float64 {
-	if m != nil && m.Cost != nil {
-		return *m.Cost
-	}
-	return 0
-}
-
-func (m *RequestLog) GetTaskQueueName() string {
-	if m != nil && m.TaskQueueName != nil {
-		return *m.TaskQueueName
-	}
-	return ""
-}
-
-func (m *RequestLog) GetTaskName() string {
-	if m != nil && m.TaskName != nil {
-		return *m.TaskName
-	}
-	return ""
-}
-
-func (m *RequestLog) GetWasLoadingRequest() bool {
-	if m != nil && m.WasLoadingRequest != nil {
-		return *m.WasLoadingRequest
-	}
-	return false
-}
-
-func (m *RequestLog) GetPendingTime() int64 {
-	if m != nil && m.PendingTime != nil {
-		return *m.PendingTime
-	}
-	return 0
-}
-
-func (m *RequestLog) GetReplicaIndex() int32 {
-	if m != nil && m.ReplicaIndex != nil {
-		return *m.ReplicaIndex
-	}
-	return Default_RequestLog_ReplicaIndex
-}
-
-func (m *RequestLog) GetFinished() bool {
-	if m != nil && m.Finished != nil {
-		return *m.Finished
-	}
-	return Default_RequestLog_Finished
-}
-
-func (m *RequestLog) GetCloneKey() []byte {
-	if m != nil {
-		return m.CloneKey
-	}
-	return nil
-}
-
-func (m *RequestLog) GetLine() []*LogLine {
-	if m != nil {
-		return m.Line
-	}
-	return nil
-}
-
-func (m *RequestLog) GetLinesIncomplete() bool {
-	if m != nil && m.LinesIncomplete != nil {
-		return *m.LinesIncomplete
-	}
-	return false
-}
-
-func (m *RequestLog) GetAppEngineRelease() []byte {
-	if m != nil {
-		return m.AppEngineRelease
-	}
-	return nil
-}
-
-func (m *RequestLog) GetExitReason() int32 {
-	if m != nil && m.ExitReason != nil {
-		return *m.ExitReason
-	}
-	return 0
-}
-
-func (m *RequestLog) GetWasThrottledForTime() bool {
-	if m != nil && m.WasThrottledForTime != nil {
-		return *m.WasThrottledForTime
-	}
-	return false
-}
-
-func (m *RequestLog) GetWasThrottledForRequests() bool {
-	if m != nil && m.WasThrottledForRequests != nil {
-		return *m.WasThrottledForRequests
-	}
-	return false
-}
-
-func (m *RequestLog) GetThrottledTime() int64 {
-	if m != nil && m.ThrottledTime != nil {
-		return *m.ThrottledTime
-	}
-	return 0
-}
-
-func (m *RequestLog) GetServerName() []byte {
-	if m != nil {
-		return m.ServerName
-	}
-	return nil
-}
-
-type LogModuleVersion struct {
-	ModuleId             *string  `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
-	VersionId            *string  `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LogModuleVersion) Reset()         { *m = LogModuleVersion{} }
-func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
-func (*LogModuleVersion) ProtoMessage()    {}
-func (*LogModuleVersion) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{8}
-}
-func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b)
-}
-func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic)
-}
-func (dst *LogModuleVersion) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LogModuleVersion.Merge(dst, src)
-}
-func (m *LogModuleVersion) XXX_Size() int {
-	return xxx_messageInfo_LogModuleVersion.Size(m)
-}
-func (m *LogModuleVersion) XXX_DiscardUnknown() {
-	xxx_messageInfo_LogModuleVersion.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo
-
-const Default_LogModuleVersion_ModuleId string = "default"
-
-func (m *LogModuleVersion) GetModuleId() string {
-	if m != nil && m.ModuleId != nil {
-		return *m.ModuleId
-	}
-	return Default_LogModuleVersion_ModuleId
-}
-
-func (m *LogModuleVersion) GetVersionId() string {
-	if m != nil && m.VersionId != nil {
-		return *m.VersionId
-	}
-	return ""
-}
-
-type LogReadRequest struct {
-	AppId                *string             `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
-	VersionId            []string            `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
-	ModuleVersion        []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"`
-	StartTime            *int64              `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
-	EndTime              *int64              `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
-	Offset               *LogOffset          `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
-	RequestId            [][]byte            `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"`
-	MinimumLogLevel      *int32              `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"`
-	IncludeIncomplete    *bool               `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"`
-	Count                *int64              `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
-	CombinedLogRegex     *string             `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"`
-	HostRegex            *string             `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"`
-	ReplicaIndex         *int32              `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"`
-	IncludeAppLogs       *bool               `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"`
-	AppLogsPerRequest    *int32              `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"`
-	IncludeHost          *bool               `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"`
-	IncludeAll           *bool               `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"`
-	CacheIterator        *bool               `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"`
-	NumShards            *int32              `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
-	XXX_unrecognized     []byte              `json:"-"`
-	XXX_sizecache        int32               `json:"-"`
-}
-
-func (m *LogReadRequest) Reset()         { *m = LogReadRequest{} }
-func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
-func (*LogReadRequest) ProtoMessage()    {}
-func (*LogReadRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{9}
-}
-func (m *LogReadRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LogReadRequest.Unmarshal(m, b)
-}
-func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic)
-}
-func (dst *LogReadRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LogReadRequest.Merge(dst, src)
-}
-func (m *LogReadRequest) XXX_Size() int {
-	return xxx_messageInfo_LogReadRequest.Size(m)
-}
-func (m *LogReadRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LogReadRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo
-
-func (m *LogReadRequest) GetAppId() string {
-	if m != nil && m.AppId != nil {
-		return *m.AppId
-	}
-	return ""
-}
-
-func (m *LogReadRequest) GetVersionId() []string {
-	if m != nil {
-		return m.VersionId
-	}
-	return nil
-}
-
-func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
-	if m != nil {
-		return m.ModuleVersion
-	}
-	return nil
-}
-
-func (m *LogReadRequest) GetStartTime() int64 {
-	if m != nil && m.StartTime != nil {
-		return *m.StartTime
-	}
-	return 0
-}
-
-func (m *LogReadRequest) GetEndTime() int64 {
-	if m != nil && m.EndTime != nil {
-		return *m.EndTime
-	}
-	return 0
-}
-
-func (m *LogReadRequest) GetOffset() *LogOffset {
-	if m != nil {
-		return m.Offset
-	}
-	return nil
-}
-
-func (m *LogReadRequest) GetRequestId() [][]byte {
-	if m != nil {
-		return m.RequestId
-	}
-	return nil
-}
-
-func (m *LogReadRequest) GetMinimumLogLevel() int32 {
-	if m != nil && m.MinimumLogLevel != nil {
-		return *m.MinimumLogLevel
-	}
-	return 0
-}
-
-func (m *LogReadRequest) GetIncludeIncomplete() bool {
-	if m != nil && m.IncludeIncomplete != nil {
-		return *m.IncludeIncomplete
-	}
-	return false
-}
-
-func (m *LogReadRequest) GetCount() int64 {
-	if m != nil && m.Count != nil {
-		return *m.Count
-	}
-	return 0
-}
-
-func (m *LogReadRequest) GetCombinedLogRegex() string {
-	if m != nil && m.CombinedLogRegex != nil {
-		return *m.CombinedLogRegex
-	}
-	return ""
-}
-
-func (m *LogReadRequest) GetHostRegex() string {
-	if m != nil && m.HostRegex != nil {
-		return *m.HostRegex
-	}
-	return ""
-}
-
-func (m *LogReadRequest) GetReplicaIndex() int32 {
-	if m != nil && m.ReplicaIndex != nil {
-		return *m.ReplicaIndex
-	}
-	return 0
-}
-
-func (m *LogReadRequest) GetIncludeAppLogs() bool {
-	if m != nil && m.IncludeAppLogs != nil {
-		return *m.IncludeAppLogs
-	}
-	return false
-}
-
-func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
-	if m != nil && m.AppLogsPerRequest != nil {
-		return *m.AppLogsPerRequest
-	}
-	return 0
-}
-
-func (m *LogReadRequest) GetIncludeHost() bool {
-	if m != nil && m.IncludeHost != nil {
-		return *m.IncludeHost
-	}
-	return false
-}
-
-func (m *LogReadRequest) GetIncludeAll() bool {
-	if m != nil && m.IncludeAll != nil {
-		return *m.IncludeAll
-	}
-	return false
-}
-
-func (m *LogReadRequest) GetCacheIterator() bool {
-	if m != nil && m.CacheIterator != nil {
-		return *m.CacheIterator
-	}
-	return false
-}
-
-func (m *LogReadRequest) GetNumShards() int32 {
-	if m != nil && m.NumShards != nil {
-		return *m.NumShards
-	}
-	return 0
-}
-
-type LogReadResponse struct {
-	Log                  []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
-	Offset               *LogOffset    `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
-	LastEndTime          *int64        `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
-	XXX_unrecognized     []byte        `json:"-"`
-	XXX_sizecache        int32         `json:"-"`
-}
-
-func (m *LogReadResponse) Reset()         { *m = LogReadResponse{} }
-func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
-func (*LogReadResponse) ProtoMessage()    {}
-func (*LogReadResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{10}
-}
-func (m *LogReadResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LogReadResponse.Unmarshal(m, b)
-}
-func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic)
-}
-func (dst *LogReadResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LogReadResponse.Merge(dst, src)
-}
-func (m *LogReadResponse) XXX_Size() int {
-	return xxx_messageInfo_LogReadResponse.Size(m)
-}
-func (m *LogReadResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LogReadResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo
-
-func (m *LogReadResponse) GetLog() []*RequestLog {
-	if m != nil {
-		return m.Log
-	}
-	return nil
-}
-
-func (m *LogReadResponse) GetOffset() *LogOffset {
-	if m != nil {
-		return m.Offset
-	}
-	return nil
-}
-
-func (m *LogReadResponse) GetLastEndTime() int64 {
-	if m != nil && m.LastEndTime != nil {
-		return *m.LastEndTime
-	}
-	return 0
-}
-
-type LogUsageRecord struct {
-	VersionId            *string  `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
-	StartTime            *int32   `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
-	EndTime              *int32   `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
-	Count                *int64   `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
-	TotalSize            *int64   `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
-	Records              *int32   `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LogUsageRecord) Reset()         { *m = LogUsageRecord{} }
-func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRecord) ProtoMessage()    {}
-func (*LogUsageRecord) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{11}
-}
-func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b)
-}
-func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageRecord) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LogUsageRecord.Merge(dst, src)
-}
-func (m *LogUsageRecord) XXX_Size() int {
-	return xxx_messageInfo_LogUsageRecord.Size(m)
-}
-func (m *LogUsageRecord) XXX_DiscardUnknown() {
-	xxx_messageInfo_LogUsageRecord.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo
-
-func (m *LogUsageRecord) GetVersionId() string {
-	if m != nil && m.VersionId != nil {
-		return *m.VersionId
-	}
-	return ""
-}
-
-func (m *LogUsageRecord) GetStartTime() int32 {
-	if m != nil && m.StartTime != nil {
-		return *m.StartTime
-	}
-	return 0
-}
-
-func (m *LogUsageRecord) GetEndTime() int32 {
-	if m != nil && m.EndTime != nil {
-		return *m.EndTime
-	}
-	return 0
-}
-
-func (m *LogUsageRecord) GetCount() int64 {
-	if m != nil && m.Count != nil {
-		return *m.Count
-	}
-	return 0
-}
-
-func (m *LogUsageRecord) GetTotalSize() int64 {
-	if m != nil && m.TotalSize != nil {
-		return *m.TotalSize
-	}
-	return 0
-}
-
-func (m *LogUsageRecord) GetRecords() int32 {
-	if m != nil && m.Records != nil {
-		return *m.Records
-	}
-	return 0
-}
-
-type LogUsageRequest struct {
-	AppId                *string  `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
-	VersionId            []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
-	StartTime            *int32   `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
-	EndTime              *int32   `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
-	ResolutionHours      *uint32  `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"`
-	CombineVersions      *bool    `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"`
-	UsageVersion         *int32   `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"`
-	VersionsOnly         *bool    `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LogUsageRequest) Reset()         { *m = LogUsageRequest{} }
-func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRequest) ProtoMessage()    {}
-func (*LogUsageRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{12}
-}
-func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b)
-}
-func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LogUsageRequest.Merge(dst, src)
-}
-func (m *LogUsageRequest) XXX_Size() int {
-	return xxx_messageInfo_LogUsageRequest.Size(m)
-}
-func (m *LogUsageRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LogUsageRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo
-
-const Default_LogUsageRequest_ResolutionHours uint32 = 1
-
-func (m *LogUsageRequest) GetAppId() string {
-	if m != nil && m.AppId != nil {
-		return *m.AppId
-	}
-	return ""
-}
-
-func (m *LogUsageRequest) GetVersionId() []string {
-	if m != nil {
-		return m.VersionId
-	}
-	return nil
-}
-
-func (m *LogUsageRequest) GetStartTime() int32 {
-	if m != nil && m.StartTime != nil {
-		return *m.StartTime
-	}
-	return 0
-}
-
-func (m *LogUsageRequest) GetEndTime() int32 {
-	if m != nil && m.EndTime != nil {
-		return *m.EndTime
-	}
-	return 0
-}
-
-func (m *LogUsageRequest) GetResolutionHours() uint32 {
-	if m != nil && m.ResolutionHours != nil {
-		return *m.ResolutionHours
-	}
-	return Default_LogUsageRequest_ResolutionHours
-}
-
-func (m *LogUsageRequest) GetCombineVersions() bool {
-	if m != nil && m.CombineVersions != nil {
-		return *m.CombineVersions
-	}
-	return false
-}
-
-func (m *LogUsageRequest) GetUsageVersion() int32 {
-	if m != nil && m.UsageVersion != nil {
-		return *m.UsageVersion
-	}
-	return 0
-}
-
-func (m *LogUsageRequest) GetVersionsOnly() bool {
-	if m != nil && m.VersionsOnly != nil {
-		return *m.VersionsOnly
-	}
-	return false
-}
-
-type LogUsageResponse struct {
-	Usage                []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
-	Summary              *LogUsageRecord   `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *LogUsageResponse) Reset()         { *m = LogUsageResponse{} }
-func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
-func (*LogUsageResponse) ProtoMessage()    {}
-func (*LogUsageResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_log_service_f054fd4b5012319d, []int{13}
-}
-func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b)
-}
-func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LogUsageResponse.Merge(dst, src)
-}
-func (m *LogUsageResponse) XXX_Size() int {
-	return xxx_messageInfo_LogUsageResponse.Size(m)
-}
-func (m *LogUsageResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LogUsageResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo
-
-func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
-	if m != nil {
-		return m.Usage
-	}
-	return nil
-}
-
-func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
-	if m != nil {
-		return m.Summary
-	}
-	return nil
-}
-
-func init() {
-	proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError")
-	proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine")
-	proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup")
-	proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest")
-	proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest")
-	proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset")
-	proto.RegisterType((*LogLine)(nil), "appengine.LogLine")
-	proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog")
-	proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion")
-	proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest")
-	proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse")
-	proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord")
-	proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest")
-	proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse")
-}
-
-func init() {
-	proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d)
-}
-
-var fileDescriptor_log_service_f054fd4b5012319d = []byte{
-	// 1553 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6,
-	0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e,
-	0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40,
-	0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72,
-	0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d,
-	0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9,
-	0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32,
-	0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5,
-	0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3,
-	0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17,
-	0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f,
-	0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3,
-	0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46,
-	0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26,
-	0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31,
-	0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5,
-	0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd,
-	0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46,
-	0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe,
-	0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc,
-	0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1,
-	0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f,
-	0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39,
-	0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35,
-	0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd,
-	0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b,
-	0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c,
-	0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0,
-	0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36,
-	0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f,
-	0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a,
-	0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed,
-	0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95,
-	0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91,
-	0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87,
-	0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1,
-	0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5,
-	0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf,
-	0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce,
-	0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66,
-	0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0,
-	0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62,
-	0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c,
-	0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba,
-	0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a,
-	0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa,
-	0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4,
-	0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1,
-	0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62,
-	0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b,
-	0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4,
-	0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90,
-	0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78,
-	0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff,
-	0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56,
-	0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5,
-	0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b,
-	0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21,
-	0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69,
-	0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98,
-	0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51,
-	0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17,
-	0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c,
-	0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f,
-	0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f,
-	0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b,
-	0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10,
-	0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54,
-	0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d,
-	0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98,
-	0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0,
-	0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0,
-	0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61,
-	0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83,
-	0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3,
-	0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75,
-	0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38,
-	0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37,
-	0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e,
-	0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d,
-	0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65,
-	0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43,
-	0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06,
-	0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2,
-	0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea,
-	0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64,
-	0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1,
-	0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf,
-	0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36,
-	0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53,
-	0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7,
-	0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56,
-	0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b,
-	0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77,
-	0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05,
-	0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd,
-	0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00,
-	0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
deleted file mode 100644
index 8981dc47..00000000
--- a/vendor/google.golang.org/appengine/internal/log/log_service.proto
+++ /dev/null
@@ -1,150 +0,0 @@
-syntax = "proto2";
-option go_package = "log";
-
-package appengine;
-
-message LogServiceError {
-  enum ErrorCode {
-    OK  = 0;
-    INVALID_REQUEST = 1;
-    STORAGE_ERROR = 2;
-  }
-}
-
-message UserAppLogLine {
-  required int64 timestamp_usec = 1;
-  required int64 level = 2;
-  required string message = 3;
-}
-
-message UserAppLogGroup {
-  repeated UserAppLogLine log_line = 2;
-}
-
-message FlushRequest {
-  optional bytes logs = 1;
-}
-
-message SetStatusRequest {
-  required string status = 1;
-}
-
-
-message LogOffset {
-  optional bytes request_id = 1;
-}
-
-message LogLine {
-  required int64 time = 1;
-  required int32 level = 2;
-  required string log_message = 3;
-}
-
-message RequestLog {
-  required string app_id = 1;
-  optional string module_id = 37 [default="default"];
-  required string version_id = 2;
-  required bytes request_id = 3;
-  optional LogOffset offset = 35;
-  required string ip = 4;
-  optional string nickname = 5;
-  required int64 start_time = 6;
-  required int64 end_time = 7;
-  required int64 latency = 8;
-  required int64 mcycles = 9;
-  required string method = 10;
-  required string resource = 11;
-  required string http_version = 12;
-  required int32 status = 13;
-  required int64 response_size = 14;
-  optional string referrer = 15;
-  optional string user_agent = 16;
-  required string url_map_entry = 17;
-  required string combined = 18;
-  optional int64 api_mcycles = 19;
-  optional string host = 20;
-  optional double cost = 21;
-
-  optional string task_queue_name = 22;
-  optional string task_name = 23;
-
-  optional bool was_loading_request = 24;
-  optional int64 pending_time = 25;
-  optional int32 replica_index = 26 [default = -1];
-  optional bool finished = 27 [default = true];
-  optional bytes clone_key = 28;
-
-  repeated LogLine line = 29;
-
-  optional bool lines_incomplete = 36;
-  optional bytes app_engine_release = 38;
-
-  optional int32 exit_reason = 30;
-  optional bool was_throttled_for_time = 31;
-  optional bool was_throttled_for_requests = 32;
-  optional int64 throttled_time = 33;
-
-  optional bytes server_name = 34;
-}
-
-message LogModuleVersion {
-  optional string module_id = 1 [default="default"];
-  optional string version_id = 2;
-}
-
-message LogReadRequest {
-  required string app_id = 1;
-  repeated string version_id = 2;
-  repeated LogModuleVersion module_version = 19;
-
-  optional int64 start_time = 3;
-  optional int64 end_time = 4;
-  optional LogOffset offset = 5;
-  repeated bytes request_id = 6;
-
-  optional int32 minimum_log_level = 7;
-  optional bool include_incomplete = 8;
-  optional int64 count = 9;
-
-  optional string combined_log_regex = 14;
-  optional string host_regex = 15;
-  optional int32 replica_index = 16;
-
-  optional bool include_app_logs = 10;
-  optional int32 app_logs_per_request = 17;
-  optional bool include_host = 11;
-  optional bool include_all = 12;
-  optional bool cache_iterator = 13;
-  optional int32 num_shards = 18;
-}
-
-message LogReadResponse {
-  repeated RequestLog log = 1;
-  optional LogOffset offset = 2;
-  optional int64 last_end_time = 3;
-}
-
-message LogUsageRecord {
-  optional string version_id = 1;
-  optional int32 start_time = 2;
-  optional int32 end_time = 3;
-  optional int64 count = 4;
-  optional int64 total_size = 5;
-  optional int32 records = 6;
-}
-
-message LogUsageRequest {
-  required string app_id = 1;
-  repeated string version_id = 2;
-  optional int32 start_time = 3;
-  optional int32 end_time = 4;
-  optional uint32 resolution_hours = 5 [default = 1];
-  optional bool combine_versions = 6;
-  optional int32 usage_version = 7;
-  optional bool versions_only = 8;
-}
-
-message LogUsageResponse {
-  repeated LogUsageRecord usage = 1;
-  optional LogUsageRecord summary = 2;
-}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
deleted file mode 100644
index afd0ae84..00000000
--- a/vendor/google.golang.org/appengine/internal/main.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-//go:build appengine
-// +build appengine
-
-package internal
-
-import (
-	"appengine_internal"
-)
-
-func Main() {
-	MainPath = ""
-	appengine_internal.Main()
-}
diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go
deleted file mode 100644
index 357dce4d..00000000
--- a/vendor/google.golang.org/appengine/internal/main_common.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package internal
-
-// MainPath stores the file path of the main package. On App Engine Standard
-// using Go version 1.9 and below, this will be unset. On App Engine Flex and
-// App Engine Standard second-gen (Go 1.11 and above), this will be the
-// filepath to package main.
-var MainPath string
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
deleted file mode 100644
index 86a8caf0..00000000
--- a/vendor/google.golang.org/appengine/internal/main_vm.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-//go:build !appengine
-// +build !appengine
-
-package internal
-
-import (
-	"io"
-	"log"
-	"net/http"
-	"net/url"
-	"os"
-	"path/filepath"
-	"runtime"
-)
-
-func Main() {
-	MainPath = filepath.Dir(findMainPath())
-	installHealthChecker(http.DefaultServeMux)
-
-	port := "8080"
-	if s := os.Getenv("PORT"); s != "" {
-		port = s
-	}
-
-	host := ""
-	if IsDevAppServer() {
-		host = "127.0.0.1"
-	}
-	if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil {
-		log.Fatalf("http.ListenAndServe: %v", err)
-	}
-}
-
-// Find the path to package main by looking at the root Caller.
-func findMainPath() string {
-	pc := make([]uintptr, 100)
-	n := runtime.Callers(2, pc)
-	frames := runtime.CallersFrames(pc[:n])
-	for {
-		frame, more := frames.Next()
-		// Tests won't have package main, instead they have testing.tRunner
-		if frame.Function == "main.main" || frame.Function == "testing.tRunner" {
-			return frame.File
-		}
-		if !more {
-			break
-		}
-	}
-	return ""
-}
-
-func installHealthChecker(mux *http.ServeMux) {
-	// If no health check handler has been installed by this point, add a trivial one.
-	const healthPath = "/_ah/health"
-	hreq := &http.Request{
-		Method: "GET",
-		URL: &url.URL{
-			Path: healthPath,
-		},
-	}
-	if _, pat := mux.Handler(hreq); pat != healthPath {
-		mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
-			io.WriteString(w, "ok")
-		})
-	}
-}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
deleted file mode 100644
index c4ba63bb..00000000
--- a/vendor/google.golang.org/appengine/internal/metadata.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file has code for accessing metadata.
-//
-// References:
-//	https://cloud.google.com/compute/docs/metadata
-
-import (
-	"fmt"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-)
-
-const (
-	metadataHost = "metadata"
-	metadataPath = "/computeMetadata/v1/"
-)
-
-var (
-	metadataRequestHeaders = http.Header{
-		"Metadata-Flavor": []string{"Google"},
-	}
-)
-
-// TODO(dsymonds): Do we need to support default values, like Python?
-func mustGetMetadata(key string) []byte {
-	b, err := getMetadata(key)
-	if err != nil {
-		panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
-	}
-	return b
-}
-
-func getMetadata(key string) ([]byte, error) {
-	// TODO(dsymonds): May need to use url.Parse to support keys with query args.
-	req := &http.Request{
-		Method: "GET",
-		URL: &url.URL{
-			Scheme: "http",
-			Host:   metadataHost,
-			Path:   metadataPath + key,
-		},
-		Header: metadataRequestHeaders,
-		Host:   metadataHost,
-	}
-	resp, err := http.DefaultClient.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-	if resp.StatusCode != 200 {
-		return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
-	}
-	return ioutil.ReadAll(resp.Body)
-}
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
deleted file mode 100644
index fe429720..00000000
--- a/vendor/google.golang.org/appengine/internal/net.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file implements a network dialer that limits the number of concurrent connections.
-// It is only used for API calls.
-
-import (
-	"log"
-	"net"
-	"runtime"
-	"sync"
-	"time"
-)
-
-var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
-
-func limitRelease() {
-	// non-blocking
-	select {
-	case <-limitSem:
-	default:
-		// This should not normally happen.
-		log.Print("appengine: unbalanced limitSem release!")
-	}
-}
-
-func limitDial(network, addr string) (net.Conn, error) {
-	limitSem <- 1
-
-	// Dial with a timeout in case the API host is MIA.
-	// The connection should normally be very fast.
-	conn, err := net.DialTimeout(network, addr, 10*time.Second)
-	if err != nil {
-		limitRelease()
-		return nil, err
-	}
-	lc := &limitConn{Conn: conn}
-	runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
-	return lc, nil
-}
-
-type limitConn struct {
-	close sync.Once
-	net.Conn
-}
-
-func (lc *limitConn) Close() error {
-	defer lc.close.Do(func() {
-		limitRelease()
-		runtime.SetFinalizer(lc, nil)
-	})
-	return lc.Conn.Close()
-}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
deleted file mode 100644
index 2fdb546a..00000000
--- a/vendor/google.golang.org/appengine/internal/regen.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash -e
-#
-# This script rebuilds the generated code for the protocol buffers.
-# To run this you will need protoc and goprotobuf installed;
-# see https://github.com/golang/protobuf for instructions.
-
-PKG=google.golang.org/appengine
-
-function die() {
-	echo 1>&2 $*
-	exit 1
-}
-
-# Sanity check that the right tools are accessible.
-for tool in go protoc protoc-gen-go; do
-	q=$(which $tool) || die "didn't find $tool"
-	echo 1>&2 "$tool: $q"
-done
-
-echo -n 1>&2 "finding package dir... "
-pkgdir=$(go list -f '{{.Dir}}' $PKG)
-echo 1>&2 $pkgdir
-base=$(echo $pkgdir | sed "s,/$PKG\$,,")
-echo 1>&2 "base: $base"
-cd $base
-
-# Run protoc once per package.
-for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
-	echo 1>&2 "* $dir"
-	protoc --go_out=. $dir/*.proto
-done
-
-for f in $(find $PKG/internal -name '*.pb.go'); do
-  # Remove proto.RegisterEnum calls.
-  # These cause duplicate registration panics when these packages
-  # are used on classic App Engine. proto.RegisterEnum only affects
-  # parsing the text format; we don't care about that.
-  # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
-  sed -i '/proto.RegisterEnum/d' $f
-done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
deleted file mode 100644
index 8d782a38..00000000
--- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
+++ /dev/null
@@ -1,361 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
-
-package remote_api
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type RpcError_ErrorCode int32
-
-const (
-	RpcError_UNKNOWN             RpcError_ErrorCode = 0
-	RpcError_CALL_NOT_FOUND      RpcError_ErrorCode = 1
-	RpcError_PARSE_ERROR         RpcError_ErrorCode = 2
-	RpcError_SECURITY_VIOLATION  RpcError_ErrorCode = 3
-	RpcError_OVER_QUOTA          RpcError_ErrorCode = 4
-	RpcError_REQUEST_TOO_LARGE   RpcError_ErrorCode = 5
-	RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
-	RpcError_FEATURE_DISABLED    RpcError_ErrorCode = 7
-	RpcError_BAD_REQUEST         RpcError_ErrorCode = 8
-	RpcError_RESPONSE_TOO_LARGE  RpcError_ErrorCode = 9
-	RpcError_CANCELLED           RpcError_ErrorCode = 10
-	RpcError_REPLAY_ERROR        RpcError_ErrorCode = 11
-	RpcError_DEADLINE_EXCEEDED   RpcError_ErrorCode = 12
-)
-
-var RpcError_ErrorCode_name = map[int32]string{
-	0:  "UNKNOWN",
-	1:  "CALL_NOT_FOUND",
-	2:  "PARSE_ERROR",
-	3:  "SECURITY_VIOLATION",
-	4:  "OVER_QUOTA",
-	5:  "REQUEST_TOO_LARGE",
-	6:  "CAPABILITY_DISABLED",
-	7:  "FEATURE_DISABLED",
-	8:  "BAD_REQUEST",
-	9:  "RESPONSE_TOO_LARGE",
-	10: "CANCELLED",
-	11: "REPLAY_ERROR",
-	12: "DEADLINE_EXCEEDED",
-}
-var RpcError_ErrorCode_value = map[string]int32{
-	"UNKNOWN":             0,
-	"CALL_NOT_FOUND":      1,
-	"PARSE_ERROR":         2,
-	"SECURITY_VIOLATION":  3,
-	"OVER_QUOTA":          4,
-	"REQUEST_TOO_LARGE":   5,
-	"CAPABILITY_DISABLED": 6,
-	"FEATURE_DISABLED":    7,
-	"BAD_REQUEST":         8,
-	"RESPONSE_TOO_LARGE":  9,
-	"CANCELLED":           10,
-	"REPLAY_ERROR":        11,
-	"DEADLINE_EXCEEDED":   12,
-}
-
-func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
-	p := new(RpcError_ErrorCode)
-	*p = x
-	return p
-}
-func (x RpcError_ErrorCode) String() string {
-	return proto.EnumName(RpcError_ErrorCode_name, int32(x))
-}
-func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
-	if err != nil {
-		return err
-	}
-	*x = RpcError_ErrorCode(value)
-	return nil
-}
-func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
-}
-
-type Request struct {
-	ServiceName          *string  `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
-	Method               *string  `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
-	Request              []byte   `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
-	RequestId            *string  `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Request) Reset()         { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage()    {}
-func (*Request) Descriptor() ([]byte, []int) {
-	return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
-}
-func (m *Request) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Request.Unmarshal(m, b)
-}
-func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Request.Marshal(b, m, deterministic)
-}
-func (dst *Request) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Request.Merge(dst, src)
-}
-func (m *Request) XXX_Size() int {
-	return xxx_messageInfo_Request.Size(m)
-}
-func (m *Request) XXX_DiscardUnknown() {
-	xxx_messageInfo_Request.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Request proto.InternalMessageInfo
-
-func (m *Request) GetServiceName() string {
-	if m != nil && m.ServiceName != nil {
-		return *m.ServiceName
-	}
-	return ""
-}
-
-func (m *Request) GetMethod() string {
-	if m != nil && m.Method != nil {
-		return *m.Method
-	}
-	return ""
-}
-
-func (m *Request) GetRequest() []byte {
-	if m != nil {
-		return m.Request
-	}
-	return nil
-}
-
-func (m *Request) GetRequestId() string {
-	if m != nil && m.RequestId != nil {
-		return *m.RequestId
-	}
-	return ""
-}
-
-type ApplicationError struct {
-	Code                 *int32   `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
-	Detail               *string  `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *ApplicationError) Reset()         { *m = ApplicationError{} }
-func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
-func (*ApplicationError) ProtoMessage()    {}
-func (*ApplicationError) Descriptor() ([]byte, []int) {
-	return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
-}
-func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
-}
-func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
-}
-func (dst *ApplicationError) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ApplicationError.Merge(dst, src)
-}
-func (m *ApplicationError) XXX_Size() int {
-	return xxx_messageInfo_ApplicationError.Size(m)
-}
-func (m *ApplicationError) XXX_DiscardUnknown() {
-	xxx_messageInfo_ApplicationError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
-
-func (m *ApplicationError) GetCode() int32 {
-	if m != nil && m.Code != nil {
-		return *m.Code
-	}
-	return 0
-}
-
-func (m *ApplicationError) GetDetail() string {
-	if m != nil && m.Detail != nil {
-		return *m.Detail
-	}
-	return ""
-}
-
-type RpcError struct {
-	Code                 *int32   `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
-	Detail               *string  `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *RpcError) Reset()         { *m = RpcError{} }
-func (m *RpcError) String() string { return proto.CompactTextString(m) }
-func (*RpcError) ProtoMessage()    {}
-func (*RpcError) Descriptor() ([]byte, []int) {
-	return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
-}
-func (m *RpcError) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_RpcError.Unmarshal(m, b)
-}
-func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
-}
-func (dst *RpcError) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RpcError.Merge(dst, src)
-}
-func (m *RpcError) XXX_Size() int {
-	return xxx_messageInfo_RpcError.Size(m)
-}
-func (m *RpcError) XXX_DiscardUnknown() {
-	xxx_messageInfo_RpcError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RpcError proto.InternalMessageInfo
-
-func (m *RpcError) GetCode() int32 {
-	if m != nil && m.Code != nil {
-		return *m.Code
-	}
-	return 0
-}
-
-func (m *RpcError) GetDetail() string {
-	if m != nil && m.Detail != nil {
-		return *m.Detail
-	}
-	return ""
-}
-
-type Response struct {
-	Response             []byte            `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
-	Exception            []byte            `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
-	ApplicationError     *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
-	JavaException        []byte            `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
-	RpcError             *RpcError         `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *Response) Reset()         { *m = Response{} }
-func (m *Response) String() string { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage()    {}
-func (*Response) Descriptor() ([]byte, []int) {
-	return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
-}
-func (m *Response) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Response.Unmarshal(m, b)
-}
-func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Response.Marshal(b, m, deterministic)
-}
-func (dst *Response) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Response.Merge(dst, src)
-}
-func (m *Response) XXX_Size() int {
-	return xxx_messageInfo_Response.Size(m)
-}
-func (m *Response) XXX_DiscardUnknown() {
-	xxx_messageInfo_Response.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Response proto.InternalMessageInfo
-
-func (m *Response) GetResponse() []byte {
-	if m != nil {
-		return m.Response
-	}
-	return nil
-}
-
-func (m *Response) GetException() []byte {
-	if m != nil {
-		return m.Exception
-	}
-	return nil
-}
-
-func (m *Response) GetApplicationError() *ApplicationError {
-	if m != nil {
-		return m.ApplicationError
-	}
-	return nil
-}
-
-func (m *Response) GetJavaException() []byte {
-	if m != nil {
-		return m.JavaException
-	}
-	return nil
-}
-
-func (m *Response) GetRpcError() *RpcError {
-	if m != nil {
-		return m.RpcError
-	}
-	return nil
-}
-
-func init() {
-	proto.RegisterType((*Request)(nil), "remote_api.Request")
-	proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError")
-	proto.RegisterType((*RpcError)(nil), "remote_api.RpcError")
-	proto.RegisterType((*Response)(nil), "remote_api.Response")
-}
-
-func init() {
-	proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
-}
-
-var fileDescriptor_remote_api_1978114ec33a273d = []byte{
-	// 531 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
-	0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
-	0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e,
-	0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c,
-	0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2,
-	0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa,
-	0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a,
-	0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98,
-	0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6,
-	0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca,
-	0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60,
-	0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9,
-	0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a,
-	0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba,
-	0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6,
-	0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86,
-	0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf,
-	0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21,
-	0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f,
-	0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53,
-	0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2,
-	0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f,
-	0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3,
-	0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0,
-	0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef,
-	0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64,
-	0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b,
-	0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5,
-	0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c,
-	0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf,
-	0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7,
-	0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e,
-	0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f,
-	0x03, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
deleted file mode 100644
index f21763a4..00000000
--- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-syntax = "proto2";
-option go_package = "remote_api";
-
-package remote_api;
-
-message Request {
-  required string service_name = 2;
-  required string method = 3;
-  required bytes request = 4;
-  optional string request_id = 5;
-}
-
-message ApplicationError {
-  required int32 code = 1;
-  required string detail = 2;
-}
-
-message RpcError {
-  enum ErrorCode {
-    UNKNOWN = 0;
-    CALL_NOT_FOUND = 1;
-    PARSE_ERROR = 2;
-    SECURITY_VIOLATION = 3;
-    OVER_QUOTA = 4;
-    REQUEST_TOO_LARGE = 5;
-    CAPABILITY_DISABLED = 6;
-    FEATURE_DISABLED = 7;
-    BAD_REQUEST = 8;
-    RESPONSE_TOO_LARGE = 9;
-    CANCELLED = 10;
-    REPLAY_ERROR = 11;
-    DEADLINE_EXCEEDED = 12;
-  }
-  required int32 code = 1;
-  optional string detail = 2;
-}
-
-message Response {
-  optional bytes response = 1;
-  optional bytes exception = 2;
-  optional ApplicationError application_error = 3;
-  optional bytes java_exception = 4;
-  optional RpcError rpc_error = 5;
-}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
deleted file mode 100644
index 2ae8ab9f..00000000
--- a/vendor/google.golang.org/appengine/internal/transaction.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file implements hooks for applying datastore transactions.
-
-import (
-	"context"
-	"errors"
-	"reflect"
-
-	"github.com/golang/protobuf/proto"
-
-	basepb "google.golang.org/appengine/internal/base"
-	pb "google.golang.org/appengine/internal/datastore"
-)
-
-var transactionSetters = make(map[reflect.Type]reflect.Value)
-
-// RegisterTransactionSetter registers a function that sets transaction information
-// in a protocol buffer message. f should be a function with two arguments,
-// the first being a protocol buffer type, and the second being *datastore.Transaction.
-func RegisterTransactionSetter(f interface{}) {
-	v := reflect.ValueOf(f)
-	transactionSetters[v.Type().In(0)] = v
-}
-
-// applyTransaction applies the transaction t to message pb
-// by using the relevant setter passed to RegisterTransactionSetter.
-func applyTransaction(pb proto.Message, t *pb.Transaction) {
-	v := reflect.ValueOf(pb)
-	if f, ok := transactionSetters[v.Type()]; ok {
-		f.Call([]reflect.Value{v, reflect.ValueOf(t)})
-	}
-}
-
-var transactionKey = "used for *Transaction"
-
-func transactionFromContext(ctx context.Context) *transaction {
-	t, _ := ctx.Value(&transactionKey).(*transaction)
-	return t
-}
-
-func withTransaction(ctx context.Context, t *transaction) context.Context {
-	return context.WithValue(ctx, &transactionKey, t)
-}
-
-type transaction struct {
-	transaction pb.Transaction
-	finished    bool
-}
-
-var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
-
-func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
-	if transactionFromContext(c) != nil {
-		return nil, errors.New("nested transactions are not supported")
-	}
-
-	// Begin the transaction.
-	t := &transaction{}
-	req := &pb.BeginTransactionRequest{
-		App: proto.String(FullyQualifiedAppID(c)),
-	}
-	if xg {
-		req.AllowMultipleEg = proto.Bool(true)
-	}
-	if previousTransaction != nil {
-		req.PreviousTransaction = previousTransaction
-	}
-	if readOnly {
-		req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
-	} else {
-		req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
-	}
-	if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
-		return nil, err
-	}
-
-	// Call f, rolling back the transaction if f returns a non-nil error, or panics.
-	// The panic is not recovered.
-	defer func() {
-		if t.finished {
-			return
-		}
-		t.finished = true
-		// Ignore the error return value, since we are already returning a non-nil
-		// error (or we're panicking).
-		Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
-	}()
-	if err := f(withTransaction(c, t)); err != nil {
-		return &t.transaction, err
-	}
-	t.finished = true
-
-	// Commit the transaction.
-	res := &pb.CommitResponse{}
-	err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
-	if ae, ok := err.(*APIError); ok {
-		/* TODO: restore this conditional
-		if appengine.IsDevAppServer() {
-		*/
-		// The Python Dev AppServer raises an ApplicationError with error code 2 (which is
-		// Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
-		if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
-			return &t.transaction, ErrConcurrentTransaction
-		}
-		if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
-			return &t.transaction, ErrConcurrentTransaction
-		}
-	}
-	return &t.transaction, err
-}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
deleted file mode 100644
index 5f727750..00000000
--- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
+++ /dev/null
@@ -1,527 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
-
-package urlfetch
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type URLFetchServiceError_ErrorCode int32
-
-const (
-	URLFetchServiceError_OK                       URLFetchServiceError_ErrorCode = 0
-	URLFetchServiceError_INVALID_URL              URLFetchServiceError_ErrorCode = 1
-	URLFetchServiceError_FETCH_ERROR              URLFetchServiceError_ErrorCode = 2
-	URLFetchServiceError_UNSPECIFIED_ERROR        URLFetchServiceError_ErrorCode = 3
-	URLFetchServiceError_RESPONSE_TOO_LARGE       URLFetchServiceError_ErrorCode = 4
-	URLFetchServiceError_DEADLINE_EXCEEDED        URLFetchServiceError_ErrorCode = 5
-	URLFetchServiceError_SSL_CERTIFICATE_ERROR    URLFetchServiceError_ErrorCode = 6
-	URLFetchServiceError_DNS_ERROR                URLFetchServiceError_ErrorCode = 7
-	URLFetchServiceError_CLOSED                   URLFetchServiceError_ErrorCode = 8
-	URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
-	URLFetchServiceError_TOO_MANY_REDIRECTS       URLFetchServiceError_ErrorCode = 10
-	URLFetchServiceError_MALFORMED_REPLY          URLFetchServiceError_ErrorCode = 11
-	URLFetchServiceError_CONNECTION_ERROR         URLFetchServiceError_ErrorCode = 12
-)
-
-var URLFetchServiceError_ErrorCode_name = map[int32]string{
-	0:  "OK",
-	1:  "INVALID_URL",
-	2:  "FETCH_ERROR",
-	3:  "UNSPECIFIED_ERROR",
-	4:  "RESPONSE_TOO_LARGE",
-	5:  "DEADLINE_EXCEEDED",
-	6:  "SSL_CERTIFICATE_ERROR",
-	7:  "DNS_ERROR",
-	8:  "CLOSED",
-	9:  "INTERNAL_TRANSIENT_ERROR",
-	10: "TOO_MANY_REDIRECTS",
-	11: "MALFORMED_REPLY",
-	12: "CONNECTION_ERROR",
-}
-var URLFetchServiceError_ErrorCode_value = map[string]int32{
-	"OK":                       0,
-	"INVALID_URL":              1,
-	"FETCH_ERROR":              2,
-	"UNSPECIFIED_ERROR":        3,
-	"RESPONSE_TOO_LARGE":       4,
-	"DEADLINE_EXCEEDED":        5,
-	"SSL_CERTIFICATE_ERROR":    6,
-	"DNS_ERROR":                7,
-	"CLOSED":                   8,
-	"INTERNAL_TRANSIENT_ERROR": 9,
-	"TOO_MANY_REDIRECTS":       10,
-	"MALFORMED_REPLY":          11,
-	"CONNECTION_ERROR":         12,
-}
-
-func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
-	p := new(URLFetchServiceError_ErrorCode)
-	*p = x
-	return p
-}
-func (x URLFetchServiceError_ErrorCode) String() string {
-	return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
-}
-func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
-	if err != nil {
-		return err
-	}
-	*x = URLFetchServiceError_ErrorCode(value)
-	return nil
-}
-func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0}
-}
-
-type URLFetchRequest_RequestMethod int32
-
-const (
-	URLFetchRequest_GET    URLFetchRequest_RequestMethod = 1
-	URLFetchRequest_POST   URLFetchRequest_RequestMethod = 2
-	URLFetchRequest_HEAD   URLFetchRequest_RequestMethod = 3
-	URLFetchRequest_PUT    URLFetchRequest_RequestMethod = 4
-	URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
-	URLFetchRequest_PATCH  URLFetchRequest_RequestMethod = 6
-)
-
-var URLFetchRequest_RequestMethod_name = map[int32]string{
-	1: "GET",
-	2: "POST",
-	3: "HEAD",
-	4: "PUT",
-	5: "DELETE",
-	6: "PATCH",
-}
-var URLFetchRequest_RequestMethod_value = map[string]int32{
-	"GET":    1,
-	"POST":   2,
-	"HEAD":   3,
-	"PUT":    4,
-	"DELETE": 5,
-	"PATCH":  6,
-}
-
-func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
-	p := new(URLFetchRequest_RequestMethod)
-	*p = x
-	return p
-}
-func (x URLFetchRequest_RequestMethod) String() string {
-	return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
-}
-func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
-	if err != nil {
-		return err
-	}
-	*x = URLFetchRequest_RequestMethod(value)
-	return nil
-}
-func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
-}
-
-type URLFetchServiceError struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *URLFetchServiceError) Reset()         { *m = URLFetchServiceError{} }
-func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
-func (*URLFetchServiceError) ProtoMessage()    {}
-func (*URLFetchServiceError) Descriptor() ([]byte, []int) {
-	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0}
-}
-func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b)
-}
-func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_URLFetchServiceError.Merge(dst, src)
-}
-func (m *URLFetchServiceError) XXX_Size() int {
-	return xxx_messageInfo_URLFetchServiceError.Size(m)
-}
-func (m *URLFetchServiceError) XXX_DiscardUnknown() {
-	xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo
-
-type URLFetchRequest struct {
-	Method                        *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
-	Url                           *string                        `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
-	Header                        []*URLFetchRequest_Header      `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
-	Payload                       []byte                         `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
-	FollowRedirects               *bool                          `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
-	Deadline                      *float64                       `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
-	MustValidateServerCertificate *bool                          `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
-	XXX_NoUnkeyedLiteral          struct{}                       `json:"-"`
-	XXX_unrecognized              []byte                         `json:"-"`
-	XXX_sizecache                 int32                          `json:"-"`
-}
-
-func (m *URLFetchRequest) Reset()         { *m = URLFetchRequest{} }
-func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
-func (*URLFetchRequest) ProtoMessage()    {}
-func (*URLFetchRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1}
-}
-func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b)
-}
-func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_URLFetchRequest.Merge(dst, src)
-}
-func (m *URLFetchRequest) XXX_Size() int {
-	return xxx_messageInfo_URLFetchRequest.Size(m)
-}
-func (m *URLFetchRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_URLFetchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo
-
-const Default_URLFetchRequest_FollowRedirects bool = true
-const Default_URLFetchRequest_MustValidateServerCertificate bool = true
-
-func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
-	if m != nil && m.Method != nil {
-		return *m.Method
-	}
-	return URLFetchRequest_GET
-}
-
-func (m *URLFetchRequest) GetUrl() string {
-	if m != nil && m.Url != nil {
-		return *m.Url
-	}
-	return ""
-}
-
-func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *URLFetchRequest) GetPayload() []byte {
-	if m != nil {
-		return m.Payload
-	}
-	return nil
-}
-
-func (m *URLFetchRequest) GetFollowRedirects() bool {
-	if m != nil && m.FollowRedirects != nil {
-		return *m.FollowRedirects
-	}
-	return Default_URLFetchRequest_FollowRedirects
-}
-
-func (m *URLFetchRequest) GetDeadline() float64 {
-	if m != nil && m.Deadline != nil {
-		return *m.Deadline
-	}
-	return 0
-}
-
-func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
-	if m != nil && m.MustValidateServerCertificate != nil {
-		return *m.MustValidateServerCertificate
-	}
-	return Default_URLFetchRequest_MustValidateServerCertificate
-}
-
-type URLFetchRequest_Header struct {
-	Key                  *string  `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
-	Value                *string  `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *URLFetchRequest_Header) Reset()         { *m = URLFetchRequest_Header{} }
-func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
-func (*URLFetchRequest_Header) ProtoMessage()    {}
-func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) {
-	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
-}
-func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b)
-}
-func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src)
-}
-func (m *URLFetchRequest_Header) XXX_Size() int {
-	return xxx_messageInfo_URLFetchRequest_Header.Size(m)
-}
-func (m *URLFetchRequest_Header) XXX_DiscardUnknown() {
-	xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo
-
-func (m *URLFetchRequest_Header) GetKey() string {
-	if m != nil && m.Key != nil {
-		return *m.Key
-	}
-	return ""
-}
-
-func (m *URLFetchRequest_Header) GetValue() string {
-	if m != nil && m.Value != nil {
-		return *m.Value
-	}
-	return ""
-}
-
-type URLFetchResponse struct {
-	Content               []byte                     `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
-	StatusCode            *int32                     `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
-	Header                []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
-	ContentWasTruncated   *bool                      `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
-	ExternalBytesSent     *int64                     `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
-	ExternalBytesReceived *int64                     `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
-	FinalUrl              *string                    `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
-	ApiCpuMilliseconds    *int64                     `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
-	ApiBytesSent          *int64                     `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
-	ApiBytesReceived      *int64                     `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
-	XXX_NoUnkeyedLiteral  struct{}                   `json:"-"`
-	XXX_unrecognized      []byte                     `json:"-"`
-	XXX_sizecache         int32                      `json:"-"`
-}
-
-func (m *URLFetchResponse) Reset()         { *m = URLFetchResponse{} }
-func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
-func (*URLFetchResponse) ProtoMessage()    {}
-func (*URLFetchResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2}
-}
-func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b)
-}
-func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_URLFetchResponse.Merge(dst, src)
-}
-func (m *URLFetchResponse) XXX_Size() int {
-	return xxx_messageInfo_URLFetchResponse.Size(m)
-}
-func (m *URLFetchResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_URLFetchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo
-
-const Default_URLFetchResponse_ContentWasTruncated bool = false
-const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
-const Default_URLFetchResponse_ApiBytesSent int64 = 0
-const Default_URLFetchResponse_ApiBytesReceived int64 = 0
-
-func (m *URLFetchResponse) GetContent() []byte {
-	if m != nil {
-		return m.Content
-	}
-	return nil
-}
-
-func (m *URLFetchResponse) GetStatusCode() int32 {
-	if m != nil && m.StatusCode != nil {
-		return *m.StatusCode
-	}
-	return 0
-}
-
-func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *URLFetchResponse) GetContentWasTruncated() bool {
-	if m != nil && m.ContentWasTruncated != nil {
-		return *m.ContentWasTruncated
-	}
-	return Default_URLFetchResponse_ContentWasTruncated
-}
-
-func (m *URLFetchResponse) GetExternalBytesSent() int64 {
-	if m != nil && m.ExternalBytesSent != nil {
-		return *m.ExternalBytesSent
-	}
-	return 0
-}
-
-func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
-	if m != nil && m.ExternalBytesReceived != nil {
-		return *m.ExternalBytesReceived
-	}
-	return 0
-}
-
-func (m *URLFetchResponse) GetFinalUrl() string {
-	if m != nil && m.FinalUrl != nil {
-		return *m.FinalUrl
-	}
-	return ""
-}
-
-func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
-	if m != nil && m.ApiCpuMilliseconds != nil {
-		return *m.ApiCpuMilliseconds
-	}
-	return Default_URLFetchResponse_ApiCpuMilliseconds
-}
-
-func (m *URLFetchResponse) GetApiBytesSent() int64 {
-	if m != nil && m.ApiBytesSent != nil {
-		return *m.ApiBytesSent
-	}
-	return Default_URLFetchResponse_ApiBytesSent
-}
-
-func (m *URLFetchResponse) GetApiBytesReceived() int64 {
-	if m != nil && m.ApiBytesReceived != nil {
-		return *m.ApiBytesReceived
-	}
-	return Default_URLFetchResponse_ApiBytesReceived
-}
-
-type URLFetchResponse_Header struct {
-	Key                  *string  `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
-	Value                *string  `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *URLFetchResponse_Header) Reset()         { *m = URLFetchResponse_Header{} }
-func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
-func (*URLFetchResponse_Header) ProtoMessage()    {}
-func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) {
-	return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0}
-}
-func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b)
-}
-func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src)
-}
-func (m *URLFetchResponse_Header) XXX_Size() int {
-	return xxx_messageInfo_URLFetchResponse_Header.Size(m)
-}
-func (m *URLFetchResponse_Header) XXX_DiscardUnknown() {
-	xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo
-
-func (m *URLFetchResponse_Header) GetKey() string {
-	if m != nil && m.Key != nil {
-		return *m.Key
-	}
-	return ""
-}
-
-func (m *URLFetchResponse_Header) GetValue() string {
-	if m != nil && m.Value != nil {
-		return *m.Value
-	}
-	return ""
-}
-
-func init() {
-	proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError")
-	proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest")
-	proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header")
-	proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse")
-	proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header")
-}
-
-func init() {
-	proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced)
-}
-
-var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{
-	// 770 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54,
-	0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29,
-	0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e,
-	0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d,
-	0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b,
-	0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27,
-	0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92,
-	0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7,
-	0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17,
-	0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec,
-	0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c,
-	0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a,
-	0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01,
-	0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14,
-	0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f,
-	0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07,
-	0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87,
-	0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a,
-	0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a,
-	0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37,
-	0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc,
-	0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde,
-	0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71,
-	0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17,
-	0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea,
-	0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4,
-	0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6,
-	0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96,
-	0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d,
-	0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d,
-	0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb,
-	0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad,
-	0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86,
-	0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20,
-	0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e,
-	0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f,
-	0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21,
-	0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c,
-	0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b,
-	0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6,
-	0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02,
-	0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b,
-	0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9,
-	0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e,
-	0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97,
-	0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3,
-	0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8,
-	0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05,
-	0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
deleted file mode 100644
index f695edf6..00000000
--- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+++ /dev/null
@@ -1,64 +0,0 @@
-syntax = "proto2";
-option go_package = "urlfetch";
-
-package appengine;
-
-message URLFetchServiceError {
-  enum ErrorCode {
-    OK = 0;
-    INVALID_URL = 1;
-    FETCH_ERROR = 2;
-    UNSPECIFIED_ERROR = 3;
-    RESPONSE_TOO_LARGE = 4;
-    DEADLINE_EXCEEDED = 5;
-    SSL_CERTIFICATE_ERROR = 6;
-    DNS_ERROR = 7;
-    CLOSED = 8;
-    INTERNAL_TRANSIENT_ERROR = 9;
-    TOO_MANY_REDIRECTS = 10;
-    MALFORMED_REPLY = 11;
-    CONNECTION_ERROR = 12;
-  }
-}
-
-message URLFetchRequest {
-  enum RequestMethod {
-    GET = 1;
-    POST = 2;
-    HEAD = 3;
-    PUT = 4;
-    DELETE = 5;
-    PATCH = 6;
-  }
-  required RequestMethod Method = 1;
-  required string Url = 2;
-  repeated group Header = 3 {
-    required string Key = 4;
-    required string Value = 5;
-  }
-  optional bytes Payload = 6 [ctype=CORD];
-
-  optional bool FollowRedirects = 7 [default=true];
-
-  optional double Deadline = 8;
-
-  optional bool MustValidateServerCertificate = 9 [default=true];
-}
-
-message URLFetchResponse {
-  optional bytes Content = 1;
-  required int32 StatusCode = 2;
-  repeated group Header = 3 {
-    required string Key = 4;
-    required string Value = 5;
-  }
-  optional bool ContentWasTruncated = 6 [default=false];
-  optional int64 ExternalBytesSent = 7;
-  optional int64 ExternalBytesReceived = 8;
-
-  optional string FinalUrl = 9;
-
-  optional int64 ApiCpuMilliseconds = 10 [default=0];
-  optional int64 ApiBytesSent = 11 [default=0];
-  optional int64 ApiBytesReceived = 12 [default=0];
-}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
deleted file mode 100644
index 6c0d7241..00000000
--- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package urlfetch provides an http.RoundTripper implementation
-// for fetching URLs via App Engine's urlfetch service.
-package urlfetch // import "google.golang.org/appengine/urlfetch"
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/golang/protobuf/proto"
-
-	"google.golang.org/appengine/internal"
-	pb "google.golang.org/appengine/internal/urlfetch"
-)
-
-// Transport is an implementation of http.RoundTripper for
-// App Engine. Users should generally create an http.Client using
-// this transport and use the Client rather than using this transport
-// directly.
-type Transport struct {
-	Context context.Context
-
-	// Controls whether the application checks the validity of SSL certificates
-	// over HTTPS connections. A value of false (the default) instructs the
-	// application to send a request to the server only if the certificate is
-	// valid and signed by a trusted certificate authority (CA), and also
-	// includes a hostname that matches the certificate. A value of true
-	// instructs the application to perform no certificate validation.
-	AllowInvalidServerCertificate bool
-}
-
-// Verify statically that *Transport implements http.RoundTripper.
-var _ http.RoundTripper = (*Transport)(nil)
-
-// Client returns an *http.Client using a default urlfetch Transport. This
-// client will check the validity of SSL certificates.
-//
-// Any deadline of the provided context will be used for requests through this client.
-// If the client does not have a deadline, then an App Engine default of 60 second is used.
-func Client(ctx context.Context) *http.Client {
-	return &http.Client{
-		Transport: &Transport{
-			Context: ctx,
-		},
-	}
-}
-
-type bodyReader struct {
-	content   []byte
-	truncated bool
-	closed    bool
-}
-
-// ErrTruncatedBody is the error returned after the final Read() from a
-// response's Body if the body has been truncated by App Engine's proxy.
-var ErrTruncatedBody = errors.New("urlfetch: truncated body")
-
-func statusCodeToText(code int) string {
-	if t := http.StatusText(code); t != "" {
-		return t
-	}
-	return strconv.Itoa(code)
-}
-
-func (br *bodyReader) Read(p []byte) (n int, err error) {
-	if br.closed {
-		if br.truncated {
-			return 0, ErrTruncatedBody
-		}
-		return 0, io.EOF
-	}
-	n = copy(p, br.content)
-	if n > 0 {
-		br.content = br.content[n:]
-		return
-	}
-	if br.truncated {
-		br.closed = true
-		return 0, ErrTruncatedBody
-	}
-	return 0, io.EOF
-}
-
-func (br *bodyReader) Close() error {
-	br.closed = true
-	br.content = nil
-	return nil
-}
-
-// A map of the URL Fetch-accepted methods that take a request body.
-var methodAcceptsRequestBody = map[string]bool{
-	"POST":  true,
-	"PUT":   true,
-	"PATCH": true,
-}
-
-// urlString returns a valid string given a URL. This function is necessary because
-// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
-// See http://code.google.com/p/go/issues/detail?id=4860.
-func urlString(u *url.URL) string {
-	if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
-		return u.String()
-	}
-	aux := *u
-	aux.Opaque = "//" + aux.Host + aux.Opaque
-	return aux.String()
-}
-
-// RoundTrip issues a single HTTP request and returns its response. Per the
-// http.RoundTripper interface, RoundTrip only returns an error if there
-// was an unsupported request or the URL Fetch proxy fails.
-// Note that HTTP response codes such as 5xx, 403, 404, etc are not
-// errors as far as the transport is concerned and will be returned
-// with err set to nil.
-func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
-	methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
-	if !ok {
-		return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
-	}
-
-	method := pb.URLFetchRequest_RequestMethod(methNum)
-
-	freq := &pb.URLFetchRequest{
-		Method:                        &method,
-		Url:                           proto.String(urlString(req.URL)),
-		FollowRedirects:               proto.Bool(false), // http.Client's responsibility
-		MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
-	}
-	if deadline, ok := t.Context.Deadline(); ok {
-		freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
-	}
-
-	for k, vals := range req.Header {
-		for _, val := range vals {
-			freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
-				Key:   proto.String(k),
-				Value: proto.String(val),
-			})
-		}
-	}
-	if methodAcceptsRequestBody[req.Method] && req.Body != nil {
-		// Avoid a []byte copy if req.Body has a Bytes method.
-		switch b := req.Body.(type) {
-		case interface {
-			Bytes() []byte
-		}:
-			freq.Payload = b.Bytes()
-		default:
-			freq.Payload, err = ioutil.ReadAll(req.Body)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-
-	fres := &pb.URLFetchResponse{}
-	if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
-		return nil, err
-	}
-
-	res = &http.Response{}
-	res.StatusCode = int(*fres.StatusCode)
-	res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
-	res.Header = make(http.Header)
-	res.Request = req
-
-	// Faked:
-	res.ProtoMajor = 1
-	res.ProtoMinor = 1
-	res.Proto = "HTTP/1.1"
-	res.Close = true
-
-	for _, h := range fres.Header {
-		hkey := http.CanonicalHeaderKey(*h.Key)
-		hval := *h.Value
-		if hkey == "Content-Length" {
-			// Will get filled in below for all but HEAD requests.
-			if req.Method == "HEAD" {
-				res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
-			}
-			continue
-		}
-		res.Header.Add(hkey, hval)
-	}
-
-	if req.Method != "HEAD" {
-		res.ContentLength = int64(len(fres.Content))
-	}
-
-	truncated := fres.GetContentWasTruncated()
-	res.Body = &bodyReader{content: fres.Content, truncated: truncated}
-	return
-}
-
-func init() {
-	internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
-	internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
-}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index f4790237..737d6876 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -102,7 +102,7 @@ type decoder struct {
 }
 
 // newError returns an error object with position info.
-func (d decoder) newError(pos int, f string, x ...interface{}) error {
+func (d decoder) newError(pos int, f string, x ...any) error {
 	line, column := d.Position(pos)
 	head := fmt.Sprintf("(line %d:%d): ", line, column)
 	return errors.New(head+f, x...)
@@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error {
 }
 
 // syntaxError returns a syntax error for given position.
-func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+func (d decoder) syntaxError(pos int, f string, x ...any) error {
 	line, column := d.Position(pos)
 	head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
 	return errors.New(head+f, x...)
@@ -192,11 +192,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro
 				fd = fieldDescs.ByTextName(name)
 			}
 		}
-		if flags.ProtoLegacy {
-			if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
-				fd = nil // reset since the weak reference is not linked in
-			}
-		}
 
 		if fd == nil {
 			// Field is unknown.
@@ -351,7 +346,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.
 		panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
 	}
 
-	return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+	return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString())
 }
 
 func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index 29846df2..0e72d853 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto
 		}
 
 		v := m.Get(fd)
-		isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
-		isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
-		if isProto2Scalar || isSingularMessage {
+		if fd.HasPresence() {
 			if m.skipNull {
 				continue
 			}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
index 4b177c82..e9fe1039 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa
 		switch tok.Kind() {
 		case json.ObjectClose:
 			if !found {
-				return d.newError(tok.Pos(), `missing "value" field`)
+				// We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type,
+				// for compatibility with other proto runtimes that have interpreted the spec differently.
+				if m.Descriptor().FullName() != genid.Empty_message_fullname {
+					return d.newError(tok.Pos(), `missing "value" field`)
+				}
 			}
 			return nil
 
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index a45f112b..b5380505 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -84,7 +84,7 @@ type decoder struct {
 }
 
 // newError returns an error object with position info.
-func (d decoder) newError(pos int, f string, x ...interface{}) error {
+func (d decoder) newError(pos int, f string, x ...any) error {
 	line, column := d.Position(pos)
 	head := fmt.Sprintf("(line %d:%d): ", line, column)
 	return errors.New(head+f, x...)
@@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error {
 }
 
 // syntaxError returns a syntax error for given position.
-func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+func (d decoder) syntaxError(pos int, f string, x ...any) error {
 	line, column := d.Position(pos)
 	head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
 	return errors.New(head+f, x...)
@@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro
 		} else if xtErr != nil && xtErr != protoregistry.NotFound {
 			return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
 		}
-		if flags.ProtoLegacy {
-			if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
-				fd = nil // reset since the weak reference is not linked in
-			}
-		}
 
 		// Handle unknown fields.
 		if fd == nil {
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
index 8401be8c..024ffebd 100644
--- a/vendor/google.golang.org/protobuf/internal/descopts/options.go
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -9,7 +9,7 @@
 // dependency on the descriptor proto package).
 package descopts
 
-import pref "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 // These variables are set by the init function in descriptor.pb.go via logic
 // in internal/filetype. In other words, so long as the descriptor proto package
@@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect"
 //
 // Each variable is populated with a nil pointer to the options struct.
 var (
-	File           pref.ProtoMessage
-	Enum           pref.ProtoMessage
-	EnumValue      pref.ProtoMessage
-	Message        pref.ProtoMessage
-	Field          pref.ProtoMessage
-	Oneof          pref.ProtoMessage
-	ExtensionRange pref.ProtoMessage
-	Service        pref.ProtoMessage
-	Method         pref.ProtoMessage
+	File           protoreflect.ProtoMessage
+	Enum           protoreflect.ProtoMessage
+	EnumValue      protoreflect.ProtoMessage
+	Message        protoreflect.ProtoMessage
+	Field          protoreflect.ProtoMessage
+	Oneof          protoreflect.ProtoMessage
+	ExtensionRange protoreflect.ProtoMessage
+	Service        protoreflect.ProtoMessage
+	Method         protoreflect.ProtoMessage
 )
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index ff6a3836..5a57ef6f 100644
Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ
diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
index 029a6a12..bf1aba0e 100644
--- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -5,9 +5,14 @@
 // Package editionssupport defines constants for editions that are supported.
 package editionssupport
 
-import descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+import "google.golang.org/protobuf/types/descriptorpb"
 
 const (
 	Minimum = descriptorpb.Edition_EDITION_PROTO2
 	Maximum = descriptorpb.Edition_EDITION_2023
+
+	// MaximumKnown is the maximum edition that is known to Go Protobuf, but not
+	// declared as supported. In other words: end users cannot use it, but
+	// testprotos inside Go Protobuf can.
+	MaximumKnown = descriptorpb.Edition_EDITION_2024
 )
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
index d2b3ac03..ea1d3e65 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
@@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) {
 
 // newSyntaxError returns an error with line and column information useful for
 // syntax errors.
-func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error {
+func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error {
 	e := errors.New(f, x...)
 	line, column := d.Position(pos)
 	return errors.New("syntax error (line %d:%d): %v", line, column, e)
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 7e87c760..669133d0 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0))
 // The type is the underlying field type (e.g., a repeated field may be
 // represented by []T, but the Go type passed in is just T).
 // A list of enum value descriptors must be provided for enum fields.
-// This does not populate the Enum or Message (except for weak message).
+// This does not populate the Enum or Message.
 //
 // This function is a best effort attempt; parsing errors are ignored.
 func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
@@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
 			}
 		case s == "packed":
 			f.L1.EditionFeatures.IsPacked = true
-		case strings.HasPrefix(s, "weak="):
-			f.L1.IsWeak = true
-			f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
 		case strings.HasPrefix(s, "def="):
 			// The default tag is special in that everything afterwards is the
 			// default regardless of the presence of commas.
@@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string {
 		// the exact same semantics from the previous generator.
 		tag = append(tag, "json="+jsonName)
 	}
-	if fd.IsWeak() {
-		tag = append(tag, "weak="+string(fd.Message().FullName()))
-	}
 	// The previous implementation does not tag extension fields as proto3,
 	// even when the field is defined in a proto3 file. Match that behavior
 	// for consistency.
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
index 87853e78..099b2bf4 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
@@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token {
 
 // newSyntaxError returns a syntax error with line and column information for
 // current position.
-func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
+func (d *Decoder) newSyntaxError(f string, x ...any) error {
 	e := errors.New(f, x...)
 	line, column := d.Position(len(d.orig) - len(d.in))
 	return errors.New("syntax error (line %d:%d): %v", line, column, e)
diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go
index d9671982..c2d6bd52 100644
--- a/vendor/google.golang.org/protobuf/internal/errors/errors.go
+++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go
@@ -17,7 +17,7 @@ var Error = errors.New("protobuf error")
 
 // New formats a string according to the format specifier and arguments and
 // returns an error that has a "proto" prefix.
-func New(f string, x ...interface{}) error {
+func New(f string, x ...any) error {
 	return &prefixError{s: format(f, x...)}
 }
 
@@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error {
 
 // Wrap returns an error that has a "proto" prefix, the formatted string described
 // by the format specifier and arguments, and a suffix of err. The error wraps err.
-func Wrap(err error, f string, x ...interface{}) error {
+func Wrap(err error, f string, x ...any) error {
 	return &wrapError{
 		s:   format(f, x...),
 		err: err,
@@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool {
 	return target == Error
 }
 
-func format(f string, x ...interface{}) string {
+func format(f string, x ...any) string {
 	// avoid "proto: " prefix when chaining
 	for i := 0; i < len(x); i++ {
 		switch e := x[i].(type) {
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
deleted file mode 100644
index fbcd3492..00000000
--- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.13
-// +build !go1.13
-
-package errors
-
-import "reflect"
-
-// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
-func Is(err, target error) bool {
-	if target == nil {
-		return err == target
-	}
-
-	isComparable := reflect.TypeOf(target).Comparable()
-	for {
-		if isComparable && err == target {
-			return true
-		}
-		if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
-			return true
-		}
-		if err = unwrap(err); err == nil {
-			return false
-		}
-	}
-}
-
-func unwrap(err error) error {
-	u, ok := err.(interface {
-		Unwrap() error
-	})
-	if !ok {
-		return nil
-	}
-	return u.Unwrap()
-}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
deleted file mode 100644
index 5e72f1cd..00000000
--- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.13
-// +build go1.13
-
-package errors
-
-import "errors"
-
-// Is is errors.Is.
-func Is(err, target error) bool { return errors.Is(err, target) }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index ece53bea..688aabe4 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -19,7 +19,6 @@ import (
 	"google.golang.org/protobuf/internal/pragma"
 	"google.golang.org/protobuf/internal/strs"
 	"google.golang.org/protobuf/reflect/protoreflect"
-	"google.golang.org/protobuf/reflect/protoregistry"
 )
 
 // Edition is an Enum for proto2.Edition
@@ -32,6 +31,7 @@ const (
 	EditionProto2      Edition = 998
 	EditionProto3      Edition = 999
 	Edition2023        Edition = 1000
+	Edition2024        Edition = 1001
 	EditionUnsupported Edition = 100000
 )
 
@@ -77,31 +77,48 @@ type (
 		Locations SourceLocations
 	}
 
+	// EditionFeatures is a frequently-instantiated struct, so please take care
+	// to minimize padding when adding new fields to this struct (add them in
+	// the right place/order).
 	EditionFeatures struct {
+		// StripEnumPrefix determines if the plugin generates enum value
+		// constants as-is, with their prefix stripped, or both variants.
+		StripEnumPrefix int
+
 		// IsFieldPresence is true if field_presence is EXPLICIT
 		// https://protobuf.dev/editions/features/#field_presence
 		IsFieldPresence bool
+
 		// IsFieldPresence is true if field_presence is LEGACY_REQUIRED
 		// https://protobuf.dev/editions/features/#field_presence
 		IsLegacyRequired bool
+
 		// IsOpenEnum is true if enum_type is OPEN
 		// https://protobuf.dev/editions/features/#enum_type
 		IsOpenEnum bool
+
 		// IsPacked is true if repeated_field_encoding is PACKED
 		// https://protobuf.dev/editions/features/#repeated_field_encoding
 		IsPacked bool
+
 		// IsUTF8Validated is true if utf_validation is VERIFY
 		// https://protobuf.dev/editions/features/#utf8_validation
 		IsUTF8Validated bool
+
 		// IsDelimitedEncoded is true if message_encoding is DELIMITED
 		// https://protobuf.dev/editions/features/#message_encoding
 		IsDelimitedEncoded bool
+
 		// IsJSONCompliant is true if json_format is ALLOW
 		// https://protobuf.dev/editions/features/#json_format
 		IsJSONCompliant bool
+
 		// GenerateLegacyUnmarshalJSON determines if the plugin generates the
 		// UnmarshalJSON([]byte) error method for enums.
 		GenerateLegacyUnmarshalJSON bool
+		// APILevel controls which API (Open, Hybrid or Opaque) should be used
+		// for generated code (.pb.go files).
+		APILevel int
 	}
 )
 
@@ -257,7 +274,7 @@ type (
 		Kind             protoreflect.Kind
 		StringName       stringName
 		IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
-		IsWeak           bool // promoted from google.protobuf.FieldOptions
+		IsLazy           bool // promoted from google.protobuf.FieldOptions
 		Default          defaultValue
 		ContainingOneof  protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
 		Enum             protoreflect.EnumDescriptor
@@ -350,7 +367,8 @@ func (fd *Field) IsPacked() bool {
 	return fd.L1.EditionFeatures.IsPacked
 }
 func (fd *Field) IsExtension() bool { return false }
-func (fd *Field) IsWeak() bool      { return fd.L1.IsWeak }
+func (fd *Field) IsWeak() bool      { return false }
+func (fd *Field) IsLazy() bool      { return fd.L1.IsLazy }
 func (fd *Field) IsList() bool      { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
 func (fd *Field) IsMap() bool       { return fd.Message() != nil && fd.Message().IsMapEntry() }
 func (fd *Field) MapKey() protoreflect.FieldDescriptor {
@@ -376,13 +394,12 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor {
 	return fd.L1.Enum
 }
 func (fd *Field) Message() protoreflect.MessageDescriptor {
-	if fd.L1.IsWeak {
-		if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
-			return d.(protoreflect.MessageDescriptor)
-		}
-	}
 	return fd.L1.Message
 }
+func (fd *Field) IsMapEntry() bool {
+	parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor)
+	return ok && parent.IsMapEntry()
+}
 func (fd *Field) Format(s fmt.State, r rune)             { descfmt.FormatDesc(s, r, fd) }
 func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
 
@@ -421,6 +438,7 @@ type (
 		Extendee        protoreflect.MessageDescriptor
 		Cardinality     protoreflect.Cardinality
 		Kind            protoreflect.Kind
+		IsLazy          bool
 		EditionFeatures EditionFeatures
 	}
 	ExtensionL2 struct {
@@ -461,6 +479,7 @@ func (xd *Extension) IsPacked() bool {
 }
 func (xd *Extension) IsExtension() bool                      { return true }
 func (xd *Extension) IsWeak() bool                           { return false }
+func (xd *Extension) IsLazy() bool                           { return xd.L1.IsLazy }
 func (xd *Extension) IsList() bool                           { return xd.Cardinality() == protoreflect.Repeated }
 func (xd *Extension) IsMap() bool                            { return false }
 func (xd *Extension) MapKey() protoreflect.FieldDescriptor   { return nil }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index 3bc3b1cd..d2f54949 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) {
 			switch num {
 			case genid.FieldOptions_Packed_field_number:
 				xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
+			case genid.FieldOptions_Lazy_field_number:
+				xd.L1.IsLazy = protowire.DecodeBool(v)
 			}
 		case protowire.BytesType:
 			v, m := protowire.ConsumeBytes(b)
@@ -534,7 +536,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor
 }
 
 var nameBuilderPool = sync.Pool{
-	New: func() interface{} { return new(strs.Builder) },
+	New: func() any { return new(strs.Builder) },
 }
 
 func getBuilder() *strs.Builder {
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index 570181eb..d4c94458 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -32,11 +32,6 @@ func (file *File) resolveMessages() {
 		for j := range md.L2.Fields.List {
 			fd := &md.L2.Fields.List[j]
 
-			// Weak fields are resolved upon actual use.
-			if fd.L1.IsWeak {
-				continue
-			}
-
 			// Resolve message field dependency.
 			switch fd.L1.Kind {
 			case protoreflect.EnumKind:
@@ -45,6 +40,11 @@ func (file *File) resolveMessages() {
 			case protoreflect.MessageKind, protoreflect.GroupKind:
 				fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
 				depIdx++
+				if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) {
+					// A map field might inherit delimited encoding from a file-wide default feature.
+					// But maps never actually use delimited encoding. (At least for now...)
+					fd.L1.Kind = protoreflect.MessageKind
+				}
 			}
 
 			// Default is resolved here since it depends on Enum being resolved.
@@ -145,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) {
 			switch num {
 			case genid.FileDescriptorProto_PublicDependency_field_number:
 				fd.L2.Imports[v].IsPublic = true
-			case genid.FileDescriptorProto_WeakDependency_field_number:
-				fd.L2.Imports[v].IsWeak = true
 			}
 		case protowire.BytesType:
 			v, m := protowire.ConsumeBytes(b)
@@ -497,8 +495,8 @@ func (fd *Field) unmarshalOptions(b []byte) {
 			switch num {
 			case genid.FieldOptions_Packed_field_number:
 				fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
-			case genid.FieldOptions_Weak_field_number:
-				fd.L1.IsWeak = protowire.DecodeBool(v)
+			case genid.FieldOptions_Lazy_field_number:
+				fd.L1.IsLazy = protowire.DecodeBool(v)
 			case FieldOptions_EnforceUTF8:
 				fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
 			}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 11f5f356..10132c9b 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -32,6 +32,14 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
 			v, m := protowire.ConsumeVarint(b)
 			b = b[m:]
 			parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v)
+		case genid.GoFeatures_ApiLevel_field_number:
+			v, m := protowire.ConsumeVarint(b)
+			b = b[m:]
+			parent.APILevel = int(v)
+		case genid.GoFeatures_StripEnumPrefix_field_number:
+			v, m := protowire.ConsumeVarint(b)
+			b = b[m:]
+			parent.StripEnumPrefix = int(v)
 		default:
 			panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
 		}
@@ -68,7 +76,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
 			v, m := protowire.ConsumeBytes(b)
 			b = b[m:]
 			switch num {
-			case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
+			case genid.FeatureSet_Go_ext_number:
 				parent = unmarshalGoFeature(v, parent)
 			}
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
index f0e38c4e..e1b4130b 100644
--- a/vendor/google.golang.org/protobuf/internal/filetype/build.go
+++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
@@ -63,12 +63,12 @@ type Builder struct {
 	// message declarations in "flattened ordering".
 	//
 	// Dependencies are Go types for enums or messages referenced by
-	// message fields (excluding weak fields), for parent extended messages of
+	// message fields, for parent extended messages of
 	// extension fields, for enums or messages referenced by extension fields,
 	// and for input and output messages referenced by service methods.
 	// Dependencies must come after declarations, but the ordering of
 	// dependencies themselves is unspecified.
-	GoTypes []interface{}
+	GoTypes []any
 
 	// DependencyIndexes is an ordered list of indexes into GoTypes for the
 	// dependencies of messages, extensions, or services.
@@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 {
 
 type (
 	resolverByIndex struct {
-		goTypes []interface{}
+		goTypes []any
 		depIdxs depIdxs
 		fileRegistry
 	}
diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go
index 58372dd3..a06ccabc 100644
--- a/vendor/google.golang.org/protobuf/internal/flags/flags.go
+++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go
@@ -6,7 +6,7 @@
 package flags
 
 // ProtoLegacy specifies whether to enable support for legacy functionality
-// such as MessageSets, weak fields, and various other obscure behavior
+// such as MessageSets, and various other obscure behavior
 // that is necessary to maintain backwards compatibility with proto1 or
 // the pre-release variants of proto2 and proto3.
 //
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index 1447a119..f30ab6b5 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -860,11 +860,13 @@ const (
 	EnumValueOptions_Deprecated_field_name          protoreflect.Name = "deprecated"
 	EnumValueOptions_Features_field_name            protoreflect.Name = "features"
 	EnumValueOptions_DebugRedact_field_name         protoreflect.Name = "debug_redact"
+	EnumValueOptions_FeatureSupport_field_name      protoreflect.Name = "feature_support"
 	EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
 
 	EnumValueOptions_Deprecated_field_fullname          protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
 	EnumValueOptions_Features_field_fullname            protoreflect.FullName = "google.protobuf.EnumValueOptions.features"
 	EnumValueOptions_DebugRedact_field_fullname         protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact"
+	EnumValueOptions_FeatureSupport_field_fullname      protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support"
 	EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
 )
 
@@ -873,6 +875,7 @@ const (
 	EnumValueOptions_Deprecated_field_number          protoreflect.FieldNumber = 1
 	EnumValueOptions_Features_field_number            protoreflect.FieldNumber = 2
 	EnumValueOptions_DebugRedact_field_number         protoreflect.FieldNumber = 3
+	EnumValueOptions_FeatureSupport_field_number      protoreflect.FieldNumber = 4
 	EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
 )
 
diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
index 45ccd012..d9b9d916 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/doc.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -6,6 +6,6 @@
 // and the well-known types.
 package genid
 
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index 9a652a2b..f5ee7f5c 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -12,20 +12,59 @@ import (
 
 const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
 
-// Names for google.protobuf.GoFeatures.
+// Names for pb.GoFeatures.
 const (
 	GoFeatures_message_name     protoreflect.Name     = "GoFeatures"
-	GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
+	GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
 )
 
-// Field names for google.protobuf.GoFeatures.
+// Field names for pb.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
+	GoFeatures_ApiLevel_field_name                protoreflect.Name = "api_level"
+	GoFeatures_StripEnumPrefix_field_name         protoreflect.Name = "strip_enum_prefix"
 
-	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
+	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
+	GoFeatures_ApiLevel_field_fullname                protoreflect.FullName = "pb.GoFeatures.api_level"
+	GoFeatures_StripEnumPrefix_field_fullname         protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix"
 )
 
-// Field numbers for google.protobuf.GoFeatures.
+// Field numbers for pb.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
+	GoFeatures_ApiLevel_field_number                protoreflect.FieldNumber = 2
+	GoFeatures_StripEnumPrefix_field_number         protoreflect.FieldNumber = 3
+)
+
+// Full and short names for pb.GoFeatures.APILevel.
+const (
+	GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel"
+	GoFeatures_APILevel_enum_name     = "APILevel"
+)
+
+// Enum values for pb.GoFeatures.APILevel.
+const (
+	GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0
+	GoFeatures_API_OPEN_enum_value              = 1
+	GoFeatures_API_HYBRID_enum_value            = 2
+	GoFeatures_API_OPAQUE_enum_value            = 3
+)
+
+// Full and short names for pb.GoFeatures.StripEnumPrefix.
+const (
+	GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix"
+	GoFeatures_StripEnumPrefix_enum_name     = "StripEnumPrefix"
+)
+
+// Enum values for pb.GoFeatures.StripEnumPrefix.
+const (
+	GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value   = 0
+	GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value          = 1
+	GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2
+	GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value         = 3
+)
+
+// Extension numbers
+const (
+	FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
 )
diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go
index 693d2e9e..99bb95ba 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/goname.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go
@@ -11,15 +11,10 @@ const (
 	SizeCache_goname  = "sizeCache"
 	SizeCacheA_goname = "XXX_sizecache"
 
-	WeakFields_goname  = "weakFields"
-	WeakFieldsA_goname = "XXX_weak"
-
 	UnknownFields_goname  = "unknownFields"
 	UnknownFieldsA_goname = "XXX_unrecognized"
 
 	ExtensionFields_goname  = "extensionFields"
 	ExtensionFieldsA_goname = "XXX_InternalExtensions"
 	ExtensionFieldsB_goname = "XXX_extensions"
-
-	WeakFieldPrefix_goname = "XXX_weak_"
 )
diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
index 8f9ea02f..bef5a25f 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -4,7 +4,7 @@
 
 package genid
 
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 // Generic field names and numbers for synthetic map entry messages.
 const (
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/google.golang.org/protobuf/internal/genid/name.go
similarity index 50%
rename from vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
rename to vendor/google.golang.org/protobuf/internal/genid/name.go
index 1a9efa12..224f3393 100644
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/name.go
@@ -2,13 +2,11 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build go1.20
-// +build go1.20
+package genid
 
-package versions
+const (
+	NoUnkeyedLiteral_goname  = "noUnkeyedLiteral"
+	NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral"
 
-func init() {
-	if Compare(toolchain, Go1_20) < 0 {
-		toolchain = Go1_20
-	}
-}
+	BuilderSuffix_goname = "_builder"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
index 429384b8..9404270d 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -4,7 +4,7 @@
 
 package genid
 
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 // Generic field name and number for messages in wrappers.proto.
 const (
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
index a371f98d..5d5771c2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
@@ -22,13 +22,13 @@ type Export struct{}
 
 // NewError formats a string according to the format specifier and arguments and
 // returns an error that has a "proto" prefix.
-func (Export) NewError(f string, x ...interface{}) error {
+func (Export) NewError(f string, x ...any) error {
 	return errors.New(f, x...)
 }
 
 // enum is any enum type generated by protoc-gen-go
 // and must be a named int32 type.
-type enum = interface{}
+type enum = any
 
 // EnumOf returns the protoreflect.Enum interface over e.
 // It returns nil if e is nil.
@@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu
 
 // message is any message type generated by protoc-gen-go
 // and must be a pointer to a named struct type.
-type message = interface{}
+type message = any
 
 // legacyMessageWrapper wraps a v2 message as a v1 message.
 type legacyMessageWrapper struct{ m protoreflect.ProtoMessage }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
new file mode 100644
index 00000000..6075d6f6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
@@ -0,0 +1,128 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"strconv"
+	"sync/atomic"
+	"unsafe"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func (Export) UnmarshalField(msg any, fieldNum int32) {
+	UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum))
+}
+
+// Present checks the presence set for a certain field number (zero
+// based, ordered by appearance in original proto file). part is
+// a pointer to the correct element in the bitmask array, num is the
+// field number unaltered.  Example (field number 70 -> part =
+// &m.XXX_presence[1], num = 70)
+func (Export) Present(part *uint32, num uint32) bool {
+	// This hook will read an unprotected shadow presence set if
+	// we're unning under the race detector
+	raceDetectHookPresent(part, num)
+	return atomic.LoadUint32(part)&(1<<(num%32)) > 0
+}
+
+// SetPresent adds a field to the presence set. part is a pointer to
+// the relevant element in the array and num is the field number
+// unaltered.  size is the number of fields in the protocol
+// buffer.
+func (Export) SetPresent(part *uint32, num uint32, size uint32) {
+	// This hook will mutate an unprotected shadow presence set if
+	// we're running under the race detector
+	raceDetectHookSetPresent(part, num, presenceSize(size))
+	for {
+		old := atomic.LoadUint32(part)
+		if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) {
+			return
+		}
+	}
+}
+
+// SetPresentNonAtomic is like SetPresent, but operates non-atomically.
+// It is meant for use by builder methods, where the message is known not
+// to be accessible yet by other goroutines.
+func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) {
+	// This hook will mutate an unprotected shadow presence set if
+	// we're running under the race detector
+	raceDetectHookSetPresent(part, num, presenceSize(size))
+	*part |= 1 << (num % 32)
+}
+
+// ClearPresence removes a field from the presence set. part is a
+// pointer to the relevant element in the presence array and num is
+// the field number unaltered.
+func (Export) ClearPresent(part *uint32, num uint32) {
+	// This hook will mutate an unprotected shadow presence set if
+	// we're running under the race detector
+	raceDetectHookClearPresent(part, num)
+	for {
+		old := atomic.LoadUint32(part)
+		if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) {
+			return
+		}
+	}
+}
+
+// interfaceToPointer takes a pointer to an empty interface whose value is a
+// pointer type, and converts it into a "pointer" that points to the same
+// target
+func interfaceToPointer(i *any) pointer {
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+func (p pointer) atomicGetPointer() pointer {
+	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+func (p pointer) atomicSetPointer(q pointer) {
+	atomic.StorePointer((*unsafe.Pointer)(p.p), q.p)
+}
+
+// AtomicCheckPointerIsNil takes an interface (which is a pointer to a
+// pointer) and returns true if the pointed-to pointer is nil (using an
+// atomic load).  This function is inlineable and, on x86, just becomes a
+// simple load and compare.
+func (Export) AtomicCheckPointerIsNil(ptr any) bool {
+	return interfaceToPointer(&ptr).atomicGetPointer().IsNil()
+}
+
+// AtomicSetPointer takes two interfaces (first is a pointer to a pointer,
+// second is a pointer) and atomically sets the second pointer into location
+// referenced by first pointer.  Unfortunately, atomicSetPointer() does not inline
+// (even on x86), so this does not become a simple store on x86.
+func (Export) AtomicSetPointer(dstPtr, valPtr any) {
+	interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr))
+}
+
+// AtomicLoadPointer loads the pointer at the location pointed at by src,
+// and stores that pointer value into the location pointed at by dst.
+func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) {
+	*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
+}
+
+// AtomicInitializePointer makes ptr and dst point to the same value.
+//
+// If *ptr is a nil pointer, it sets *ptr = *dst.
+//
+// If *ptr is a non-nil pointer, it sets *dst = *ptr.
+func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) {
+	if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) {
+		*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
+	}
+}
+
+// MessageFieldStringOf returns the field formatted as a string,
+// either as the field name if resolvable otherwise as a decimal string.
+func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string {
+	fd := md.Fields().ByNumber(n)
+	if fd != nil {
+		return string(fd.Name())
+	}
+	return strconv.Itoa(int(n))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
new file mode 100644
index 00000000..ea276547
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
@@ -0,0 +1,34 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+
+package impl
+
+// There is no additional data as we're not running under race detector.
+type RaceDetectHookData struct{}
+
+// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away.
+func (presence) raceDetectHookPresent(num uint32)                       {}
+func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {}
+func (presence) raceDetectHookClearPresent(num uint32)                  {}
+func (presence) raceDetectHookAllocAndCopy(src presence)                {}
+
+// raceDetectHookPresent is called by the generated file interface
+// (*proto.internalFuncs) Present to optionally read an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookPresent(field *uint32, num uint32) {}
+
+// raceDetectHookSetPresent is called by the generated file interface
+// (*proto.internalFuncs) SetPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {}
+
+// raceDetectHookClearPresent is called by the generated file interface
+// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookClearPresent(field *uint32, num uint32) {}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
new file mode 100644
index 00000000..e9a27583
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
@@ -0,0 +1,126 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package impl
+
+// When running under race detector, we add a presence map of bytes, that we can access
+// in the hook functions so that we trigger the race detection whenever we have concurrent
+// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent
+// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations.
+type RaceDetectHookData struct {
+	shadowPresence *[]byte
+}
+
+// Hooks for presence bitmap operations that allocate, read and write the shadowPresence
+// using non-atomic operations.
+func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) {
+	sp := make([]byte, size)
+	atomicStoreShadowPresence(&data.shadowPresence, &sp)
+}
+
+func (p presence) raceDetectHookPresent(num uint32) {
+	data := p.toRaceDetectData()
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		_ = (*sp)[num]
+	}
+}
+
+func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) {
+	data := p.toRaceDetectData()
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp == nil {
+		data.raceDetectHookAlloc(size)
+		sp = atomicLoadShadowPresence(&data.shadowPresence)
+	}
+	(*sp)[num] = 1
+}
+
+func (p presence) raceDetectHookClearPresent(num uint32) {
+	data := p.toRaceDetectData()
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		(*sp)[num] = 0
+
+	}
+}
+
+// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies
+// shadowPresence bytes from src to lazy.
+func (p presence) raceDetectHookAllocAndCopy(q presence) {
+	sData := q.toRaceDetectData()
+	dData := p.toRaceDetectData()
+	if sData == nil {
+		return
+	}
+	srcSp := atomicLoadShadowPresence(&sData.shadowPresence)
+	if srcSp == nil {
+		atomicStoreShadowPresence(&dData.shadowPresence, nil)
+		return
+	}
+	n := len(*srcSp)
+	dSlice := make([]byte, n)
+	atomicStoreShadowPresence(&dData.shadowPresence, &dSlice)
+	for i := 0; i < n; i++ {
+		dSlice[i] = (*srcSp)[i]
+	}
+}
+
+// raceDetectHookPresent is called by the generated file interface
+// (*proto.internalFuncs) Present to optionally read an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookPresent(field *uint32, num uint32) {
+	data := findPointerToRaceDetectData(field, num)
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		_ = (*sp)[num]
+	}
+}
+
+// raceDetectHookSetPresent is called by the generated file interface
+// (*proto.internalFuncs) SetPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {
+	data := findPointerToRaceDetectData(field, num)
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp == nil {
+		data.raceDetectHookAlloc(size)
+		sp = atomicLoadShadowPresence(&data.shadowPresence)
+	}
+	(*sp)[num] = 1
+}
+
+// raceDetectHookClearPresent is called by the generated file interface
+// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookClearPresent(field *uint32, num uint32) {
+	data := findPointerToRaceDetectData(field, num)
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		(*sp)[num] = 0
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
index bff041ed..fe2c719c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
 		}
 		return nil
 	}
+
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+	}
+
 	if mi.extensionOffset.IsValid() {
 		e := p.Apply(mi.extensionOffset).Extensions()
 		if err := mi.isInitExtensions(e); err != nil {
@@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
 		if !f.isRequired && f.funcs.isInit == nil {
 			continue
 		}
+
+		if f.presenceIndex != noPresence {
+			if !presence.Present(f.presenceIndex) {
+				if f.isRequired {
+					return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+				}
+				continue
+			}
+			if f.funcs.isInit != nil {
+				f.mi.init()
+				if f.mi.needsInitCheck {
+					if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() {
+						lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
+						if !lazy.AllowedPartial() {
+							// Nothing to see here, it was checked on unmarshal
+							continue
+						}
+						mi.lazyUnmarshal(p, f.num)
+					}
+					if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil {
+						return err
+					}
+				}
+			}
+			continue
+		}
+
 		fptr := p.Apply(f.offset)
 		if f.isPointer && fptr.Elem().IsNil() {
 			if f.isRequired {
@@ -68,7 +101,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error {
 	}
 	for _, x := range *ext {
 		ei := getExtensionFieldInfo(x.Type())
-		if ei.funcs.isInit == nil {
+		if ei.funcs.isInit == nil || x.isUnexpandedLazy() {
 			continue
 		}
 		v := x.Value()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 2b8f122c..0d5b546e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -67,7 +67,6 @@ type lazyExtensionValue struct {
 	xi         *extensionFieldInfo
 	value      protoreflect.Value
 	b          []byte
-	fn         func() protoreflect.Value
 }
 
 type ExtensionField struct {
@@ -99,6 +98,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool {
 	return false
 }
 
+// isUnexpandedLazy returns true if the ExensionField is lazy and not
+// yet expanded, which means it's present and already checked for
+// initialized required fields.
+func (f *ExtensionField) isUnexpandedLazy() bool {
+	return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0
+}
+
+// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded.
+//
+// The returned buffer has to be kept over whatever operation we're planning,
+// as re-retrieving it will fail after the message is lazily decoded.
+func (f *ExtensionField) lazyBuffer() []byte {
+	// This function might be in the critical path, so check the atomic without
+	// taking a look first, then only take the lock if needed.
+	if !f.isUnexpandedLazy() {
+		return nil
+	}
+	f.lazy.mu.Lock()
+	defer f.lazy.mu.Unlock()
+	return f.lazy.b
+}
+
 func (f *ExtensionField) lazyInit() {
 	f.lazy.mu.Lock()
 	defer f.lazy.mu.Unlock()
@@ -136,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
 		}
 		f.lazy.value = val
 	} else {
-		f.lazy.value = f.lazy.fn()
+		panic("No support for lazy fns for ExtensionField")
 	}
 	f.lazy.xi = nil
-	f.lazy.fn = nil
 	f.lazy.b = nil
 	atomic.StoreUint32(&f.lazy.atomicOnce, 1)
 }
@@ -152,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
 	f.lazy = nil
 }
 
-// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
-// This must not be called concurrently.
-func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
-	f.typ = t
-	f.lazy = &lazyExtensionValue{fn: fn}
-}
-
 // Value returns the value of the extension field.
 // This may be called concurrently.
 func (f *ExtensionField) Value() protoreflect.Value {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 78ee47e4..d14d7d93 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -5,15 +5,12 @@
 package impl
 
 import (
-	"fmt"
 	"reflect"
-	"sync"
 
 	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/errors"
 	"google.golang.org/protobuf/proto"
 	"google.golang.org/protobuf/reflect/protoreflect"
-	"google.golang.org/protobuf/reflect/protoregistry"
 	"google.golang.org/protobuf/runtime/protoiface"
 )
 
@@ -65,6 +62,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
 			if err != nil {
 				return out, err
 			}
+			if cf.funcs.isInit == nil {
+				out.initialized = true
+			}
 			vi.Set(vw)
 			return out, nil
 		}
@@ -118,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
 	}
 }
 
-func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs {
-	var once sync.Once
-	var messageType protoreflect.MessageType
-	lazyInit := func() {
-		once.Do(func() {
-			messageName := fd.Message().FullName()
-			messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
-		})
-	}
-
-	return pointerCoderFuncs{
-		size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
-			m, ok := p.WeakFields().get(f.num)
-			if !ok {
-				return 0
-			}
-			lazyInit()
-			if messageType == nil {
-				panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
-			}
-			return sizeMessage(m, f.tagsize, opts)
-		},
-		marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-			m, ok := p.WeakFields().get(f.num)
-			if !ok {
-				return b, nil
-			}
-			lazyInit()
-			if messageType == nil {
-				panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
-			}
-			return appendMessage(b, m, f.wiretag, opts)
-		},
-		unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
-			fs := p.WeakFields()
-			m, ok := fs.get(f.num)
-			if !ok {
-				lazyInit()
-				if messageType == nil {
-					return unmarshalOutput{}, errUnknown
-				}
-				m = messageType.New().Interface()
-				fs.set(f.num, m)
-			}
-			return consumeMessage(b, m, wtyp, opts)
-		},
-		isInit: func(p pointer, f *coderFieldInfo) error {
-			m, ok := p.WeakFields().get(f.num)
-			if !ok {
-				return nil
-			}
-			return proto.CheckInitialized(m)
-		},
-		merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
-			sm, ok := src.WeakFields().get(f.num)
-			if !ok {
-				return
-			}
-			dm, ok := dst.WeakFields().get(f.num)
-			if !ok {
-				lazyInit()
-				if messageType == nil {
-					panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
-				}
-				dm = messageType.New().Interface()
-				dst.WeakFields().set(f.num, dm)
-			}
-			opts.Merge(dm, sm)
-		},
-	}
-}
-
 func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
 	if mi := getMessageInfo(ft); mi != nil {
 		funcs := pointerCoderFuncs{
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
new file mode 100644
index 00000000..76818ea2
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
@@ -0,0 +1,264 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"reflect"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+	mi := getMessageInfo(ft)
+	if mi == nil {
+		panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft))
+	}
+	switch fd.Kind() {
+	case protoreflect.MessageKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueMessage,
+			marshal:   appendOpaqueMessage,
+			unmarshal: consumeOpaqueMessage,
+			isInit:    isInitOpaqueMessage,
+			merge:     mergeOpaqueMessage,
+		}
+	case protoreflect.GroupKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueGroup,
+			marshal:   appendOpaqueGroup,
+			unmarshal: consumeOpaqueGroup,
+			isInit:    isInitOpaqueMessage,
+			merge:     mergeOpaqueMessage,
+		}
+	}
+	panic("unexpected field kind")
+}
+
+func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize
+}
+
+func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	mp := p.AtomicGetPointer()
+	calculatedSize := f.mi.sizePointer(mp, opts)
+	b = protowire.AppendVarint(b, f.wiretag)
+	b = protowire.AppendVarint(b, uint64(calculatedSize))
+	before := len(b)
+	b, err := f.mi.marshalAppendPointer(b, mp, opts)
+	if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
+		return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+	}
+	return b, err
+}
+
+func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.BytesType {
+		return out, errUnknown
+	}
+	v, n := protowire.ConsumeBytes(b)
+	if n < 0 {
+		return out, errDecode
+	}
+	mp := p.AtomicGetPointer()
+	if mp.IsNil() {
+		mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+	}
+	o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+	if err != nil {
+		return out, err
+	}
+	out.n = n
+	out.initialized = o.initialized
+	return out, nil
+}
+
+func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error {
+	mp := p.AtomicGetPointer()
+	if mp.IsNil() {
+		return nil
+	}
+	return f.mi.checkInitializedPointer(mp)
+}
+
+func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+	dstmp := dst.AtomicGetPointer()
+	if dstmp.IsNil() {
+		dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+	}
+	f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts)
+}
+
+func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts)
+}
+
+func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	b = protowire.AppendVarint(b, f.wiretag) // start group
+	b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts)
+	b = protowire.AppendVarint(b, f.wiretag+1) // end group
+	return b, err
+}
+
+func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.StartGroupType {
+		return out, errUnknown
+	}
+	mp := p.AtomicGetPointer()
+	if mp.IsNil() {
+		mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+	}
+	o, e := f.mi.unmarshalPointer(b, mp, f.num, opts)
+	return o, e
+}
+
+func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft))
+	}
+	mt := ft.Elem().Elem() // *[]*T -> *T
+	mi := getMessageInfo(mt)
+	if mi == nil {
+		panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt))
+	}
+	switch fd.Kind() {
+	case protoreflect.MessageKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueMessageSlice,
+			marshal:   appendOpaqueMessageSlice,
+			unmarshal: consumeOpaqueMessageSlice,
+			isInit:    isInitOpaqueMessageSlice,
+			merge:     mergeOpaqueMessageSlice,
+		}
+	case protoreflect.GroupKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueGroupSlice,
+			marshal:   appendOpaqueGroupSlice,
+			unmarshal: consumeOpaqueGroupSlice,
+			isInit:    isInitOpaqueMessageSlice,
+			merge:     mergeOpaqueMessageSlice,
+		}
+	}
+	panic("unexpected field kind")
+}
+
+func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	s := p.AtomicGetPointer().PointerSlice()
+	n := 0
+	for _, v := range s {
+		n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
+	}
+	return n
+}
+
+func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	s := p.AtomicGetPointer().PointerSlice()
+	var err error
+	for _, v := range s {
+		b = protowire.AppendVarint(b, f.wiretag)
+		siz := f.mi.sizePointer(v, opts)
+		b = protowire.AppendVarint(b, uint64(siz))
+		before := len(b)
+		b, err = f.mi.marshalAppendPointer(b, v, opts)
+		if err != nil {
+			return b, err
+		}
+		if measuredSize := len(b) - before; siz != measuredSize {
+			return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+		}
+	}
+	return b, nil
+}
+
+func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.BytesType {
+		return out, errUnknown
+	}
+	v, n := protowire.ConsumeBytes(b)
+	if n < 0 {
+		return out, errDecode
+	}
+	mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+	o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+	if err != nil {
+		return out, err
+	}
+	sp := p.AtomicGetPointer()
+	if sp.IsNil() {
+		sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+	}
+	sp.AppendPointerSlice(mp)
+	out.n = n
+	out.initialized = o.initialized
+	return out, nil
+}
+
+func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error {
+	sp := p.AtomicGetPointer()
+	if sp.IsNil() {
+		return nil
+	}
+	s := sp.PointerSlice()
+	for _, v := range s {
+		if err := f.mi.checkInitializedPointer(v); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+	ds := dst.AtomicGetPointer()
+	if ds.IsNil() {
+		ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+	}
+	for _, sp := range src.AtomicGetPointer().PointerSlice() {
+		dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+		f.mi.mergePointer(dm, sp, opts)
+		ds.AppendPointerSlice(dm)
+	}
+}
+
+func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	s := p.AtomicGetPointer().PointerSlice()
+	n := 0
+	for _, v := range s {
+		n += 2*f.tagsize + f.mi.sizePointer(v, opts)
+	}
+	return n
+}
+
+func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	s := p.AtomicGetPointer().PointerSlice()
+	var err error
+	for _, v := range s {
+		b = protowire.AppendVarint(b, f.wiretag) // start group
+		b, err = f.mi.marshalAppendPointer(b, v, opts)
+		if err != nil {
+			return b, err
+		}
+		b = protowire.AppendVarint(b, f.wiretag+1) // end group
+	}
+	return b, nil
+}
+
+func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.StartGroupType {
+		return out, errUnknown
+	}
+	mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+	out, err = f.mi.unmarshalPointer(b, mp, f.num, opts)
+	if err != nil {
+		return out, err
+	}
+	sp := p.AtomicGetPointer()
+	if sp.IsNil() {
+		sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+	}
+	sp.AppendPointerSlice(mp)
+	return out, err
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
index fb35f0ba..229c6980 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO
 		return 0
 	}
 	n := 0
-	iter := mapRange(mapv)
+	iter := mapv.MapRange()
 	for iter.Next() {
 		key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey()
 		keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
@@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o
 	if opts.Deterministic() {
 		return appendMapDeterministic(b, mapv, mapi, f, opts)
 	}
-	iter := mapRange(mapv)
+	iter := mapv.MapRange()
 	for iter.Next() {
 		var err error
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
 		if !mi.needsInitCheck {
 			return nil
 		}
-		iter := mapRange(mapv)
+		iter := mapv.MapRange()
 		for iter.Next() {
 			val := pointerOfValue(iter.Value())
 			if err := mi.checkInitializedPointer(val); err != nil {
@@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
 			}
 		}
 	} else {
-		iter := mapRange(mapv)
+		iter := mapv.MapRange()
 		for iter.Next() {
 			val := mapi.conv.valConv.PBValueOf(iter.Value())
 			if err := mapi.valFuncs.isInit(val); err != nil {
@@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
 	if dstm.IsNil() {
 		dstm.Set(reflect.MakeMap(f.ft))
 	}
-	iter := mapRange(srcm)
+	iter := srcm.MapRange()
 	for iter.Next() {
 		dstm.SetMapIndex(iter.Key(), iter.Value())
 	}
@@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
 	if dstm.IsNil() {
 		dstm.Set(reflect.MakeMap(f.ft))
 	}
-	iter := mapRange(srcm)
+	iter := srcm.MapRange()
 	for iter.Next() {
 		dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...)))
 	}
@@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
 	if dstm.IsNil() {
 		dstm.Set(reflect.MakeMap(f.ft))
 	}
-	iter := mapRange(srcm)
+	iter := srcm.MapRange()
 	for iter.Next() {
 		val := reflect.New(f.ft.Elem().Elem())
 		if f.mi != nil {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
deleted file mode 100644
index 4b15493f..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.12
-// +build !go1.12
-
-package impl
-
-import "reflect"
-
-type mapIter struct {
-	v    reflect.Value
-	keys []reflect.Value
-}
-
-// mapRange provides a less-efficient equivalent to
-// the Go 1.12 reflect.Value.MapRange method.
-func mapRange(v reflect.Value) *mapIter {
-	return &mapIter{v: v}
-}
-
-func (i *mapIter) Next() bool {
-	if i.keys == nil {
-		i.keys = i.v.MapKeys()
-	} else {
-		i.keys = i.keys[1:]
-	}
-	return len(i.keys) > 0
-}
-
-func (i *mapIter) Key() reflect.Value {
-	return i.keys[0]
-}
-
-func (i *mapIter) Value() reflect.Value {
-	return i.v.MapIndex(i.keys[0])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
deleted file mode 100644
index 0b31b66e..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.12
-// +build go1.12
-
-package impl
-
-import "reflect"
-
-func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 6b2fdbb7..f78b57b0 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -32,6 +32,10 @@ type coderMessageInfo struct {
 	needsInitCheck     bool
 	isMessageSet       bool
 	numRequiredFields  uint8
+
+	lazyOffset     offset
+	presenceOffset offset
+	presenceSize   presenceSize
 }
 
 type coderFieldInfo struct {
@@ -45,12 +49,19 @@ type coderFieldInfo struct {
 	tagsize    int                      // size of the varint-encoded tag
 	isPointer  bool                     // true if IsNil may be called on the struct field
 	isRequired bool                     // true if field is required
+
+	isLazy        bool
+	presenceIndex uint32
 }
 
+const noPresence = 0xffffffff
+
 func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 	mi.sizecacheOffset = invalidOffset
 	mi.unknownOffset = invalidOffset
 	mi.extensionOffset = invalidOffset
+	mi.lazyOffset = invalidOffset
+	mi.presenceOffset = si.presenceOffset
 
 	if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
 		mi.sizecacheOffset = si.sizecacheOffset
@@ -107,12 +118,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 				},
 			}
 		case isOneof:
-			fieldOffset = offsetOf(fs, mi.Exporter)
-		case fd.IsWeak():
-			fieldOffset = si.weakOffset
-			funcs = makeWeakMessageFieldCoder(fd)
+			fieldOffset = offsetOf(fs)
 		default:
-			fieldOffset = offsetOf(fs, mi.Exporter)
+			fieldOffset = offsetOf(fs)
 			childMessage, funcs = fieldCoder(fd, ft)
 		}
 		cf := &preallocFields[i]
@@ -127,6 +135,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 			validation: newFieldValidationInfo(mi, si, fd, ft),
 			isPointer:  fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(),
 			isRequired: fd.Cardinality() == protoreflect.Required,
+
+			presenceIndex: noPresence,
 		}
 		mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
 		mi.coderFields[cf.num] = cf
@@ -189,6 +199,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 	if mi.methods.Merge == nil {
 		mi.methods.Merge = mi.merge
 	}
+	if mi.methods.Equal == nil {
+		mi.methods.Equal = equal
+	}
 }
 
 // getUnknownBytes returns a *[]byte for the unknown fields.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
new file mode 100644
index 00000000..41c1f74e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -0,0 +1,153 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"reflect"
+	"sort"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/order"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) {
+	mi.sizecacheOffset = si.sizecacheOffset
+	mi.unknownOffset = si.unknownOffset
+	mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
+	mi.extensionOffset = si.extensionOffset
+	mi.lazyOffset = si.lazyOffset
+	mi.presenceOffset = si.presenceOffset
+
+	mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
+	fields := mi.Desc.Fields()
+	for i := 0; i < fields.Len(); i++ {
+		fd := fields.Get(i)
+
+		fs := si.fieldsByNumber[fd.Number()]
+		if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() {
+			fs = si.oneofsByName[fd.ContainingOneof().Name()]
+		}
+		ft := fs.Type
+		var wiretag uint64
+		if !fd.IsPacked() {
+			wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
+		} else {
+			wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
+		}
+		var fieldOffset offset
+		var funcs pointerCoderFuncs
+		var childMessage *MessageInfo
+		switch {
+		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+			fieldOffset = offsetOf(fs)
+		case fd.Message() != nil && !fd.IsMap():
+			fieldOffset = offsetOf(fs)
+			if fd.IsList() {
+				childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft)
+			} else {
+				childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft)
+			}
+		default:
+			fieldOffset = offsetOf(fs)
+			childMessage, funcs = fieldCoder(fd, ft)
+		}
+		cf := &coderFieldInfo{
+			num:        fd.Number(),
+			offset:     fieldOffset,
+			wiretag:    wiretag,
+			ft:         ft,
+			tagsize:    protowire.SizeVarint(wiretag),
+			funcs:      funcs,
+			mi:         childMessage,
+			validation: newFieldValidationInfo(mi, si.structInfo, fd, ft),
+			isPointer: (fd.Cardinality() == protoreflect.Repeated ||
+				fd.Kind() == protoreflect.MessageKind ||
+				fd.Kind() == protoreflect.GroupKind),
+			isRequired:    fd.Cardinality() == protoreflect.Required,
+			presenceIndex: noPresence,
+		}
+
+		// TODO: Use presence for all fields.
+		//
+		// In some cases, such as maps, presence means only "might be set" rather
+		// than "is definitely set", but every field should have a presence bit to
+		// permit us to skip over definitely-unset fields at marshal time.
+
+		var hasPresence bool
+		hasPresence, cf.isLazy = usePresenceForField(si, fd)
+
+		if hasPresence {
+			cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
+		}
+
+		mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
+		mi.coderFields[cf.num] = cf
+	}
+	for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
+		if od := oneofs.Get(i); !od.IsSynthetic() {
+			mi.initOneofFieldCoders(od, si.structInfo)
+		}
+	}
+	if messageset.IsMessageSet(mi.Desc) {
+		if !mi.extensionOffset.IsValid() {
+			panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
+		}
+		if !mi.unknownOffset.IsValid() {
+			panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
+		}
+		mi.isMessageSet = true
+	}
+	sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+		return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
+	})
+
+	var maxDense protoreflect.FieldNumber
+	for _, cf := range mi.orderedCoderFields {
+		if cf.num >= 16 && cf.num >= 2*maxDense {
+			break
+		}
+		maxDense = cf.num
+	}
+	mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
+	for _, cf := range mi.orderedCoderFields {
+		if int(cf.num) > len(mi.denseCoderFields) {
+			break
+		}
+		mi.denseCoderFields[cf.num] = cf
+	}
+
+	// To preserve compatibility with historic wire output, marshal oneofs last.
+	if mi.Desc.Oneofs().Len() > 0 {
+		sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+			fi := fields.ByNumber(mi.orderedCoderFields[i].num)
+			fj := fields.ByNumber(mi.orderedCoderFields[j].num)
+			return order.LegacyFieldOrder(fi, fj)
+		})
+	}
+
+	mi.needsInitCheck = needsInitCheck(mi.Desc)
+	if mi.methods.Marshal == nil && mi.methods.Size == nil {
+		mi.methods.Flags |= piface.SupportMarshalDeterministic
+		mi.methods.Marshal = mi.marshal
+		mi.methods.Size = mi.size
+	}
+	if mi.methods.Unmarshal == nil {
+		mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
+		mi.methods.Unmarshal = mi.unmarshal
+	}
+	if mi.methods.CheckInitialized == nil {
+		mi.methods.CheckInitialized = mi.checkInitialized
+	}
+	if mi.methods.Merge == nil {
+		mi.methods.Merge = mi.merge
+	}
+	if mi.methods.Equal == nil {
+		mi.methods.Equal = equal
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
index b7a23faf..7a16ec13 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
@@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int)
 		}
 		num, _ := protowire.DecodeTag(xi.wiretag)
 		size += messageset.SizeField(num)
+		if fullyLazyExtensions(opts) {
+			// Don't expand the extension, instead use the buffer to calculate size
+			if lb := x.lazyBuffer(); lb != nil {
+				// We got hold of the buffer, so it's still lazy.
+				// Don't count the tag size in the extension buffer, it's already added.
+				size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize
+				continue
+			}
+		}
 		size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
 	}
 
@@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma
 	xi := getExtensionFieldInfo(x.Type())
 	num, _ := protowire.DecodeTag(xi.wiretag)
 	b = messageset.AppendFieldStart(b, num)
+
+	if fullyLazyExtensions(opts) {
+		// Don't expand the extension if it's still in wire format, instead use the buffer content.
+		if lb := x.lazyBuffer(); lb != nil {
+			// The tag inside the lazy buffer is a different tag (the extension
+			// number), but what we need here is the tag for FieldMessage:
+			b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType))
+			b = append(b, lb[xi.tagsize:]...)
+			b = messageset.AppendFieldEnd(b)
+			return b, nil
+		}
+	}
+
 	b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts)
 	if err != nil {
 		return b, err
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
deleted file mode 100644
index 145c577b..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
-	"reflect"
-
-	"google.golang.org/protobuf/encoding/protowire"
-)
-
-func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
-	v := p.v.Elem().Int()
-	return f.tagsize + protowire.SizeVarint(uint64(v))
-}
-
-func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	v := p.v.Elem().Int()
-	b = protowire.AppendVarint(b, f.wiretag)
-	b = protowire.AppendVarint(b, uint64(v))
-	return b, nil
-}
-
-func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
-	if wtyp != protowire.VarintType {
-		return out, errUnknown
-	}
-	v, n := protowire.ConsumeVarint(b)
-	if n < 0 {
-		return out, errDecode
-	}
-	p.v.Elem().SetInt(int64(v))
-	out.n = n
-	return out, nil
-}
-
-func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	dst.v.Elem().Set(src.v.Elem())
-}
-
-var coderEnum = pointerCoderFuncs{
-	size:      sizeEnum,
-	marshal:   appendEnum,
-	unmarshal: consumeEnum,
-	merge:     mergeEnum,
-}
-
-func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	if p.v.Elem().Int() == 0 {
-		return 0
-	}
-	return sizeEnum(p, f, opts)
-}
-
-func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	if p.v.Elem().Int() == 0 {
-		return b, nil
-	}
-	return appendEnum(b, p, f, opts)
-}
-
-func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	if src.v.Elem().Int() != 0 {
-		dst.v.Elem().Set(src.v.Elem())
-	}
-}
-
-var coderEnumNoZero = pointerCoderFuncs{
-	size:      sizeEnumNoZero,
-	marshal:   appendEnumNoZero,
-	unmarshal: consumeEnum,
-	merge:     mergeEnumNoZero,
-}
-
-func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	return sizeEnum(pointer{p.v.Elem()}, f, opts)
-}
-
-func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	return appendEnum(b, pointer{p.v.Elem()}, f, opts)
-}
-
-func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	if wtyp != protowire.VarintType {
-		return out, errUnknown
-	}
-	if p.v.Elem().IsNil() {
-		p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
-	}
-	return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
-}
-
-func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	if !src.v.Elem().IsNil() {
-		v := reflect.New(dst.v.Type().Elem().Elem())
-		v.Elem().Set(src.v.Elem().Elem())
-		dst.v.Elem().Set(v)
-	}
-}
-
-var coderEnumPtr = pointerCoderFuncs{
-	size:      sizeEnumPtr,
-	marshal:   appendEnumPtr,
-	unmarshal: consumeEnumPtr,
-	merge:     mergeEnumPtr,
-}
-
-func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	s := p.v.Elem()
-	for i, llen := 0, s.Len(); i < llen; i++ {
-		size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
-	}
-	return size
-}
-
-func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	s := p.v.Elem()
-	for i, llen := 0, s.Len(); i < llen; i++ {
-		b = protowire.AppendVarint(b, f.wiretag)
-		b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
-	}
-	return b, nil
-}
-
-func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	s := p.v.Elem()
-	if wtyp == protowire.BytesType {
-		b, n := protowire.ConsumeBytes(b)
-		if n < 0 {
-			return out, errDecode
-		}
-		for len(b) > 0 {
-			v, n := protowire.ConsumeVarint(b)
-			if n < 0 {
-				return out, errDecode
-			}
-			rv := reflect.New(s.Type().Elem()).Elem()
-			rv.SetInt(int64(v))
-			s.Set(reflect.Append(s, rv))
-			b = b[n:]
-		}
-		out.n = n
-		return out, nil
-	}
-	if wtyp != protowire.VarintType {
-		return out, errUnknown
-	}
-	v, n := protowire.ConsumeVarint(b)
-	if n < 0 {
-		return out, errDecode
-	}
-	rv := reflect.New(s.Type().Elem()).Elem()
-	rv.SetInt(int64(v))
-	s.Set(reflect.Append(s, rv))
-	out.n = n
-	return out, nil
-}
-
-func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
-}
-
-var coderEnumSlice = pointerCoderFuncs{
-	size:      sizeEnumSlice,
-	marshal:   appendEnumSlice,
-	unmarshal: consumeEnumSlice,
-	merge:     mergeEnumSlice,
-}
-
-func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	s := p.v.Elem()
-	llen := s.Len()
-	if llen == 0 {
-		return 0
-	}
-	n := 0
-	for i := 0; i < llen; i++ {
-		n += protowire.SizeVarint(uint64(s.Index(i).Int()))
-	}
-	return f.tagsize + protowire.SizeBytes(n)
-}
-
-func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	s := p.v.Elem()
-	llen := s.Len()
-	if llen == 0 {
-		return b, nil
-	}
-	b = protowire.AppendVarint(b, f.wiretag)
-	n := 0
-	for i := 0; i < llen; i++ {
-		n += protowire.SizeVarint(uint64(s.Index(i).Int()))
-	}
-	b = protowire.AppendVarint(b, uint64(n))
-	for i := 0; i < llen; i++ {
-		b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
-	}
-	return b, nil
-}
-
-var coderEnumPackedSlice = pointerCoderFuncs{
-	size:      sizeEnumPackedSlice,
-	marshal:   appendEnumPackedSlice,
-	unmarshal: consumeEnumSlice,
-	merge:     mergeEnumSlice,
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
index 757642e2..077712c2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -2,9 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
 package impl
 
 // When using unsafe pointers, we can just treat enum values as int32s.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index 185ef2ef..f72ddd88 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -14,7 +14,7 @@ import (
 // unwrapper unwraps the value to the underlying value.
 // This is implemented by List and Map.
 type unwrapper interface {
-	protoUnwrap() interface{}
+	protoUnwrap() any
 }
 
 // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types.
@@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
 	return protoreflect.ValueOfString(v.Convert(stringType).String())
 }
 func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
-	// pref.Value.String never panics, so we go through an interface
+	// protoreflect.Value.String never panics, so we go through an interface
 	// conversion here to check the type.
 	s := v.Interface().(string)
 	if c.goType.Kind() == reflect.Slice && s == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
index f8913651..18cb96fd 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
@@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value {
 func (ls *listReflect) IsValid() bool {
 	return !ls.v.IsNil()
 }
-func (ls *listReflect) protoUnwrap() interface{} {
+func (ls *listReflect) protoUnwrap() any {
 	return ls.v.Interface()
 }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
index f30b0a05..e4580b3a 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
@@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value {
 	return v
 }
 func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
-	iter := mapRange(ms.v)
+	iter := ms.v.MapRange()
 	for iter.Next() {
 		k := ms.keyConv.PBValueOf(iter.Key()).MapKey()
 		v := ms.valConv.PBValueOf(iter.Value())
@@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value {
 func (ms *mapReflect) IsValid() bool {
 	return !ms.v.IsNil()
 }
-func (ms *mapReflect) protoUnwrap() interface{} {
+func (ms *mapReflect) protoUnwrap() any {
 	return ms.v.Interface()
 }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index cda0520c..e0dd21fa 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions {
 		AllowPartial:   true,
 		DiscardUnknown: o.DiscardUnknown(),
 		Resolver:       o.resolver,
+
+		NoLazyDecoding: o.NoLazyDecoding(),
 	}
 }
 
@@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool {
 	return o.flags&protoiface.UnmarshalDiscardUnknown != 0
 }
 
-func (o unmarshalOptions) IsDefault() bool {
-	return o.flags == 0 && o.resolver == protoregistry.GlobalTypes
+func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 }
+func (o unmarshalOptions) Validated() bool   { return o.flags&protoiface.UnmarshalValidated != 0 }
+func (o unmarshalOptions) NoLazyDecoding() bool {
+	return o.flags&protoiface.UnmarshalNoLazyDecoding != 0
+}
+
+func (o unmarshalOptions) CanBeLazy() bool {
+	if o.resolver != protoregistry.GlobalTypes {
+		return false
+	}
+	// We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set
+	return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0
 }
 
 var lazyUnmarshalOptions = unmarshalOptions{
 	resolver: protoregistry.GlobalTypes,
-	depth:    protowire.DefaultRecursionLimit,
+
+	flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated,
+
+	depth: protowire.DefaultRecursionLimit,
 }
 
 type unmarshalOutput struct {
@@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 	if flags.ProtoLegacy && mi.isMessageSet {
 		return unmarshalMessageSet(mi, b, p, opts)
 	}
+
+	lazyDecoding := LazyEnabled() // default
+	if opts.NoLazyDecoding() {
+		lazyDecoding = false // explicitly disabled
+	}
+	if mi.lazyOffset.IsValid() && lazyDecoding {
+		return mi.unmarshalPointerLazy(b, p, groupTag, opts)
+	}
+	return mi.unmarshalPointerEager(b, p, groupTag, opts)
+}
+
+// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy.
+// The corresponding function for Lazy is in google_lazy.go.
+func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+
 	initialized := true
 	var requiredMask uint64
 	var exts *map[int32]ExtensionField
+
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+	}
+
 	start := len(b)
 	for len(b) > 0 {
 		// Parse the tag (field number and wire type).
@@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 			if f.funcs.isInit != nil && !o.initialized {
 				initialized = false
 			}
+
+			if f.presenceIndex != noPresence {
+				presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+			}
+
 		default:
 			// Possible extension.
 			if exts == nil && mi.extensionOffset.IsValid() {
@@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p
 		return out, errUnknown
 	}
 	if flags.LazyUnmarshalExtensions {
-		if opts.IsDefault() && x.canLazy(xt) {
+		if opts.CanBeLazy() && x.canLazy(xt) {
 			out, valid := skipExtension(b, xi, num, wtyp, opts)
 			switch valid {
 			case ValidationValid:
@@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp
 		if n < 0 {
 			return out, ValidationUnknown
 		}
+
+		if opts.Validated() {
+			out.initialized = true
+			out.n = n
+			return out, ValidationValid
+		}
+
 		out, st := xi.validation.mi.validate(v, 0, opts)
 		out.n = n
 		return out, st
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index 845c67d6..b2e21229 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -10,7 +10,8 @@ import (
 	"sync/atomic"
 
 	"google.golang.org/protobuf/internal/flags"
-	proto "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/internal/protolazy"
+	"google.golang.org/protobuf/proto"
 	piface "google.golang.org/protobuf/runtime/protoiface"
 )
 
@@ -49,8 +50,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) {
 		return 0
 	}
 	if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() {
-		if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 {
-			return int(size)
+		// The size cache contains the size + 1, to allow the
+		// zero value to be invalid, while also allowing for a
+		// 0 size to be cached.
+		if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 {
+			return int(size - 1)
 		}
 	}
 	return mi.sizePointerSlow(p, opts)
@@ -60,7 +64,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
 	if flags.ProtoLegacy && mi.isMessageSet {
 		size = sizeMessageSet(mi, p, opts)
 		if mi.sizecacheOffset.IsValid() {
-			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
 		}
 		return size
 	}
@@ -68,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
 		e := p.Apply(mi.extensionOffset).Extensions()
 		size += mi.sizeExtensions(e, opts)
 	}
+
+	var lazy **protolazy.XXX_lazyUnmarshalInfo
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+		if mi.lazyOffset.IsValid() {
+			lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+		}
+	}
+
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.size == nil {
 			continue
 		}
 		fptr := p.Apply(f.offset)
+
+		if f.presenceIndex != noPresence {
+			if !presence.Present(f.presenceIndex) {
+				continue
+			}
+
+			if f.isLazy && fptr.AtomicGetPointer().IsNil() {
+				if lazyFields(opts) {
+					size += (*lazy).SizeField(uint32(f.num))
+					continue
+				} else {
+					mi.lazyUnmarshal(p, f.num)
+				}
+			}
+			size += f.funcs.size(fptr, f, opts)
+			continue
+		}
+
 		if f.isPointer && fptr.Elem().IsNil() {
 			continue
 		}
@@ -84,13 +116,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
 		}
 	}
 	if mi.sizecacheOffset.IsValid() {
-		if size > math.MaxInt32 {
+		if size > (math.MaxInt32 - 1) {
 			// The size is too large for the int32 sizecache field.
 			// We will need to recompute the size when encoding;
 			// unfortunately expensive, but better than invalid output.
-			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1)
+			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0)
 		} else {
-			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+			// The size cache contains the size + 1, to allow the
+			// zero value to be invalid, while also allowing for a
+			// 0 size to be cached.
+			atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
 		}
 	}
 	return size
@@ -128,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
 			return b, err
 		}
 	}
+
+	var lazy **protolazy.XXX_lazyUnmarshalInfo
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+		if mi.lazyOffset.IsValid() {
+			lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+		}
+	}
+
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.marshal == nil {
 			continue
 		}
 		fptr := p.Apply(f.offset)
+
+		if f.presenceIndex != noPresence {
+			if !presence.Present(f.presenceIndex) {
+				continue
+			}
+			if f.isLazy {
+				// Be careful, this field needs to be read atomically, like for a get
+				if f.isPointer && fptr.AtomicGetPointer().IsNil() {
+					if lazyFields(opts) {
+						b, _ = (*lazy).AppendField(b, uint32(f.num))
+						continue
+					} else {
+						mi.lazyUnmarshal(p, f.num)
+					}
+				}
+
+				b, err = f.funcs.marshal(b, fptr, f, opts)
+				if err != nil {
+					return b, err
+				}
+				continue
+			} else if f.isPointer && fptr.Elem().IsNil() {
+				continue
+			}
+			b, err = f.funcs.marshal(b, fptr, f, opts)
+			if err != nil {
+				return b, err
+			}
+			continue
+		}
+
 		if f.isPointer && fptr.Elem().IsNil() {
 			continue
 		}
@@ -149,6 +225,22 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
 	return b, nil
 }
 
+// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal.
+func fullyLazyExtensions(opts marshalOptions) bool {
+	// When deterministic marshaling is requested, force an unmarshal for lazy
+	// extensions to produce a deterministic result, instead of passing through
+	// bytes lazily that may or may not match what Go Protobuf would produce.
+	return opts.flags&piface.MarshalDeterministic == 0
+}
+
+// lazyFields returns true if we should attempt to keep fields lazy over size and marshal.
+func lazyFields(opts marshalOptions) bool {
+	// When deterministic marshaling is requested, force an unmarshal for lazy
+	// fields to produce a deterministic result, instead of passing through
+	// bytes lazily that may or may not match what Go Protobuf would produce.
+	return opts.flags&piface.MarshalDeterministic == 0
+}
+
 func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
 	if ext == nil {
 		return 0
@@ -158,6 +250,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha
 		if xi.funcs.size == nil {
 			continue
 		}
+		if fullyLazyExtensions(opts) {
+			// Don't expand the extension, instead use the buffer to calculate size
+			if lb := x.lazyBuffer(); lb != nil {
+				// We got hold of the buffer, so it's still lazy.
+				n += len(lb)
+				continue
+			}
+		}
 		n += xi.funcs.size(x.Value(), xi.tagsize, opts)
 	}
 	return n
@@ -176,6 +276,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField,
 		var err error
 		for _, x := range *ext {
 			xi := getExtensionFieldInfo(x.Type())
+			if fullyLazyExtensions(opts) {
+				// Don't expand the extension if it's still in wire format, instead use the buffer content.
+				if lb := x.lazyBuffer(); lb != nil {
+					b = append(b, lb...)
+					continue
+				}
+			}
 			b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
 		}
 		return b, err
@@ -191,6 +298,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField,
 		for _, k := range keys {
 			x := (*ext)[int32(k)]
 			xi := getExtensionFieldInfo(x.Type())
+			if fullyLazyExtensions(opts) {
+				// Don't expand the extension if it's still in wire format, instead use the buffer content.
+				if lb := x.lazyBuffer(); lb != nil {
+					b = append(b, lb...)
+					continue
+				}
+			}
 			b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
 			if err != nil {
 				return b, err
diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go
new file mode 100644
index 00000000..9f6c32a7
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go
@@ -0,0 +1,224 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"bytes"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoiface"
+)
+
+func equal(in protoiface.EqualInput) protoiface.EqualOutput {
+	return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
+}
+
+// equalMessage is a fast-path variant of protoreflect.equalMessage.
+// It takes advantage of the internal messageState type to avoid
+// unnecessary allocations, type assertions.
+func equalMessage(mx, my protoreflect.Message) bool {
+	if mx == nil || my == nil {
+		return mx == my
+	}
+	if mx.Descriptor() != my.Descriptor() {
+		return false
+	}
+
+	msx, ok := mx.(*messageState)
+	if !ok {
+		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+	}
+	msy, ok := my.(*messageState)
+	if !ok {
+		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+	}
+
+	mi := msx.messageInfo()
+	miy := msy.messageInfo()
+	if mi != miy {
+		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+	}
+	mi.init()
+	// Compares regular fields
+	// Modified Message.Range code that compares two messages of the same type
+	// while going over the fields.
+	for _, ri := range mi.rangeInfos {
+		var fd protoreflect.FieldDescriptor
+		var vx, vy protoreflect.Value
+
+		switch ri := ri.(type) {
+		case *fieldInfo:
+			hx := ri.has(msx.pointer())
+			hy := ri.has(msy.pointer())
+			if hx != hy {
+				return false
+			}
+			if !hx {
+				continue
+			}
+			fd = ri.fieldDesc
+			vx = ri.get(msx.pointer())
+			vy = ri.get(msy.pointer())
+		case *oneofInfo:
+			fnx := ri.which(msx.pointer())
+			fny := ri.which(msy.pointer())
+			if fnx != fny {
+				return false
+			}
+			if fnx <= 0 {
+				continue
+			}
+			fi := mi.fields[fnx]
+			fd = fi.fieldDesc
+			vx = fi.get(msx.pointer())
+			vy = fi.get(msy.pointer())
+		}
+
+		if !equalValue(fd, vx, vy) {
+			return false
+		}
+	}
+
+	// Compare extensions.
+	// This is more complicated because mx or my could have empty/nil extension maps,
+	// however some populated extension map values are equal to nil extension maps.
+	emx := mi.extensionMap(msx.pointer())
+	emy := mi.extensionMap(msy.pointer())
+	if emx != nil {
+		for k, x := range *emx {
+			xd := x.Type().TypeDescriptor()
+			xv := x.Value()
+			var y ExtensionField
+			ok := false
+			if emy != nil {
+				y, ok = (*emy)[k]
+			}
+			// We need to treat empty lists as equal to nil values
+			if emy == nil || !ok {
+				if xd.IsList() && xv.List().Len() == 0 {
+					continue
+				}
+				return false
+			}
+
+			if !equalValue(xd, xv, y.Value()) {
+				return false
+			}
+		}
+	}
+	if emy != nil {
+		// emy may have extensions emx does not have, need to check them as well
+		for k, y := range *emy {
+			if emx != nil {
+				// emx has the field, so we already checked it
+				if _, ok := (*emx)[k]; ok {
+					continue
+				}
+			}
+			// Empty lists are equal to nil
+			if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
+				continue
+			}
+
+			// Cant be equal if the extension is populated
+			return false
+		}
+	}
+
+	return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
+	// slow path
+	if fd.Kind() != protoreflect.MessageKind {
+		return vx.Equal(vy)
+	}
+
+	// fast path special cases
+	if fd.IsMap() {
+		if fd.MapValue().Kind() == protoreflect.MessageKind {
+			return equalMessageMap(vx.Map(), vy.Map())
+		}
+		return vx.Equal(vy)
+	}
+
+	if fd.IsList() {
+		return equalMessageList(vx.List(), vy.List())
+	}
+
+	return equalMessage(vx.Message(), vy.Message())
+}
+
+// Mostly copied from protoreflect.equalMap.
+// This variant only works for messages as map types.
+// All other map types should be handled via Value.Equal.
+func equalMessageMap(mx, my protoreflect.Map) bool {
+	if mx.Len() != my.Len() {
+		return false
+	}
+	equal := true
+	mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+		if !my.Has(k) {
+			equal = false
+			return false
+		}
+		vy := my.Get(k)
+		equal = equalMessage(vx.Message(), vy.Message())
+		return equal
+	})
+	return equal
+}
+
+// Mostly copied from protoreflect.equalList.
+// The only change is the usage of equalImpl instead of protoreflect.equalValue.
+func equalMessageList(lx, ly protoreflect.List) bool {
+	if lx.Len() != ly.Len() {
+		return false
+	}
+	for i := 0; i < lx.Len(); i++ {
+		// We only operate on messages here since equalImpl will not call us in any other case.
+		if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
+			return false
+		}
+	}
+	return true
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+// Copied from protoreflect.equalUnknown.
+func equalUnknown(x, y protoreflect.RawFields) bool {
+	if len(x) != len(y) {
+		return false
+	}
+	if bytes.Equal([]byte(x), []byte(y)) {
+		return true
+	}
+
+	mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+	my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+	for len(x) > 0 {
+		fnum, _, n := protowire.ConsumeField(x)
+		mx[fnum] = append(mx[fnum], x[:n]...)
+		x = x[n:]
+	}
+	for len(y) > 0 {
+		fnum, _, n := protowire.ConsumeField(y)
+		my[fnum] = append(my[fnum], y[:n]...)
+		y = y[n:]
+	}
+	if len(mx) != len(my) {
+		return false
+	}
+
+	for k, v1 := range mx {
+		if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go
index cb25b0ba..e31249f6 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go
@@ -53,7 +53,7 @@ type ExtensionInfo struct {
 	// type returned by InterfaceOf may not be identical.
 	//
 	// Deprecated: Use InterfaceOf(xt.Zero()) instead.
-	ExtensionType interface{}
+	ExtensionType any
 
 	// Field is the field number of the extension.
 	//
@@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value {
 func (xi *ExtensionInfo) Zero() protoreflect.Value {
 	return xi.lazyInit().Zero()
 }
-func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value {
+func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value {
 	return xi.lazyInit().PBValueOf(reflect.ValueOf(v))
 }
-func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} {
+func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any {
 	return xi.lazyInit().GoValueOf(v).Interface()
 }
 func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool {
 	return xi.lazyInit().IsValidPB(v)
 }
-func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool {
+func (xi *ExtensionInfo) IsValidInterface(v any) bool {
 	return xi.lazyInit().IsValidGo(reflect.ValueOf(v))
 }
 func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
new file mode 100644
index 00000000..c7de31e2
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
@@ -0,0 +1,433 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"math/bits"
+	"os"
+	"reflect"
+	"sort"
+	"sync/atomic"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/internal/protolazy"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	preg "google.golang.org/protobuf/reflect/protoregistry"
+	piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+var enableLazy int32 = func() int32 {
+	if os.Getenv("GOPROTODEBUG") == "nolazy" {
+		return 0
+	}
+	return 1
+}()
+
+// EnableLazyUnmarshal enables lazy unmarshaling.
+func EnableLazyUnmarshal(enable bool) {
+	if enable {
+		atomic.StoreInt32(&enableLazy, 1)
+		return
+	}
+	atomic.StoreInt32(&enableLazy, 0)
+}
+
+// LazyEnabled reports whether lazy unmarshalling is currently enabled.
+func LazyEnabled() bool {
+	return atomic.LoadInt32(&enableLazy) != 0
+}
+
+// UnmarshalField unmarshals a field in a message.
+func UnmarshalField(m interface{}, num protowire.Number) {
+	switch m := m.(type) {
+	case *messageState:
+		m.messageInfo().lazyUnmarshal(m.pointer(), num)
+	case *messageReflectWrapper:
+		m.messageInfo().lazyUnmarshal(m.pointer(), num)
+	default:
+		panic(fmt.Sprintf("unsupported wrapper type %T", m))
+	}
+}
+
+func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) {
+	var f *coderFieldInfo
+	if int(num) < len(mi.denseCoderFields) {
+		f = mi.denseCoderFields[num]
+	} else {
+		f = mi.coderFields[num]
+	}
+	if f == nil {
+		panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num))
+	}
+	lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
+	start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num))
+	if !found && multipleEntries == nil {
+		panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num))
+	}
+	// The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races.
+	// Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil.
+	fp := pointerOfValue(reflect.New(f.ft))
+	if multipleEntries != nil {
+		for _, entry := range multipleEntries {
+			mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags())
+		}
+	} else {
+		mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags())
+	}
+	p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem())
+}
+
+func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error {
+	opts := lazyUnmarshalOptions
+	opts.flags |= flags
+	for len(b) > 0 {
+		// Parse the tag (field number and wire type).
+		var tag uint64
+		if b[0] < 0x80 {
+			tag = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			tag, n = protowire.ConsumeVarint(b)
+			if n < 0 {
+				return errors.New("invalid wire data")
+			}
+			b = b[n:]
+		}
+		var num protowire.Number
+		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+			return errors.New("invalid wire data")
+		} else {
+			num = protowire.Number(n)
+		}
+		wtyp := protowire.Type(tag & 7)
+		if num == f.num {
+			o, err := f.funcs.unmarshal(b, p, wtyp, f, opts)
+			if err == nil {
+				b = b[o.n:]
+				continue
+			}
+			if err != errUnknown {
+				return err
+			}
+		}
+		n := protowire.ConsumeFieldValue(num, wtyp, b)
+		if n < 0 {
+			return errors.New("invalid wire data")
+		}
+		b = b[n:]
+	}
+	return nil
+}
+
+func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
+	fmi := f.validation.mi
+	if fmi == nil {
+		fd := mi.Desc.Fields().ByNumber(f.num)
+		if fd == nil {
+			return out, ValidationUnknown
+		}
+		messageName := fd.Message().FullName()
+		messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
+		if err != nil {
+			return out, ValidationUnknown
+		}
+		var ok bool
+		fmi, ok = messageType.(*MessageInfo)
+		if !ok {
+			return out, ValidationUnknown
+		}
+	}
+	fmi.init()
+	switch f.validation.typ {
+	case validationTypeMessage:
+		if wtyp != protowire.BytesType {
+			return out, ValidationWrongWireType
+		}
+		v, n := protowire.ConsumeBytes(b)
+		if n < 0 {
+			return out, ValidationInvalid
+		}
+		out, st := fmi.validate(v, 0, opts)
+		out.n = n
+		return out, st
+	case validationTypeGroup:
+		if wtyp != protowire.StartGroupType {
+			return out, ValidationWrongWireType
+		}
+		out, st := fmi.validate(b, f.num, opts)
+		return out, st
+	default:
+		return out, ValidationUnknown
+	}
+}
+
+// unmarshalPointerLazy is similar to unmarshalPointerEager, but it
+// specifically handles lazy unmarshalling.  it expects lazyOffset and
+// presenceOffset to both be valid.
+func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	initialized := true
+	var requiredMask uint64
+	var lazy **protolazy.XXX_lazyUnmarshalInfo
+	var presence presence
+	var lazyIndex []protolazy.IndexEntry
+	var lastNum protowire.Number
+	outOfOrder := false
+	lazyDecode := false
+	presence = p.Apply(mi.presenceOffset).PresenceInfo()
+	lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+	if !presence.AnyPresent(mi.presenceSize) {
+		if opts.CanBeLazy() {
+			// If the message contains existing data, we need to merge into it.
+			// Lazy unmarshaling doesn't merge, so only enable it when the
+			// message is empty (has no presence bitmap).
+			lazyDecode = true
+			if *lazy == nil {
+				*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+			}
+			(*lazy).SetUnmarshalFlags(opts.flags)
+			if !opts.AliasBuffer() {
+				// Make a copy of the buffer for lazy unmarshaling.
+				// Set the AliasBuffer flag so recursive unmarshal
+				// operations reuse the copy.
+				b = append([]byte{}, b...)
+				opts.flags |= piface.UnmarshalAliasBuffer
+			}
+			(*lazy).SetBuffer(b)
+		}
+	}
+	// Track special handling of lazy fields.
+	//
+	// In the common case, all fields are lazyValidateOnly (and lazyFields remains nil).
+	// In the event that validation for a field fails, this map tracks handling of the field.
+	type lazyAction uint8
+	const (
+		lazyValidateOnly   lazyAction = iota // validate the field only
+		lazyUnmarshalNow                     // eagerly unmarshal the field
+		lazyUnmarshalLater                   // unmarshal the field after the message is fully processed
+	)
+	var lazyFields map[*coderFieldInfo]lazyAction
+	var exts *map[int32]ExtensionField
+	start := len(b)
+	pos := 0
+	for len(b) > 0 {
+		// Parse the tag (field number and wire type).
+		var tag uint64
+		if b[0] < 0x80 {
+			tag = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			tag, n = protowire.ConsumeVarint(b)
+			if n < 0 {
+				return out, errDecode
+			}
+			b = b[n:]
+		}
+		var num protowire.Number
+		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+			return out, errors.New("invalid field number")
+		} else {
+			num = protowire.Number(n)
+		}
+		wtyp := protowire.Type(tag & 7)
+
+		if wtyp == protowire.EndGroupType {
+			if num != groupTag {
+				return out, errors.New("mismatching end group marker")
+			}
+			groupTag = 0
+			break
+		}
+
+		var f *coderFieldInfo
+		if int(num) < len(mi.denseCoderFields) {
+			f = mi.denseCoderFields[num]
+		} else {
+			f = mi.coderFields[num]
+		}
+		var n int
+		err := errUnknown
+		discardUnknown := false
+	Field:
+		switch {
+		case f != nil:
+			if f.funcs.unmarshal == nil {
+				break
+			}
+			if f.isLazy && lazyDecode {
+				switch {
+				case lazyFields == nil || lazyFields[f] == lazyValidateOnly:
+					// Attempt to validate this field and leave it for later lazy unmarshaling.
+					o, valid := mi.skipField(b, f, wtyp, opts)
+					switch valid {
+					case ValidationValid:
+						// Skip over the valid field and continue.
+						err = nil
+						presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+						requiredMask |= f.validation.requiredBit
+						if !o.initialized {
+							initialized = false
+						}
+						n = o.n
+						break Field
+					case ValidationInvalid:
+						return out, errors.New("invalid proto wire format")
+					case ValidationWrongWireType:
+						break Field
+					case ValidationUnknown:
+						if lazyFields == nil {
+							lazyFields = make(map[*coderFieldInfo]lazyAction)
+						}
+						if presence.Present(f.presenceIndex) {
+							// We were unable to determine if the field is valid or not,
+							// and we've already skipped over at least one instance of this
+							// field. Clear the presence bit (so if we stop decoding early,
+							// we don't leave a partially-initialized field around) and flag
+							// the field for unmarshaling before we return.
+							presence.ClearPresent(f.presenceIndex)
+							lazyFields[f] = lazyUnmarshalLater
+							discardUnknown = true
+							break Field
+						} else {
+							// We were unable to determine if the field is valid or not,
+							// but this is the first time we've seen it. Flag it as needing
+							// eager unmarshaling and fall through to the eager unmarshal case below.
+							lazyFields[f] = lazyUnmarshalNow
+						}
+					}
+				case lazyFields[f] == lazyUnmarshalLater:
+					// This field will be unmarshaled in a separate pass below.
+					// Skip over it here.
+					discardUnknown = true
+					break Field
+				default:
+					// Eagerly unmarshal the field.
+				}
+			}
+			if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) {
+				if p.Apply(f.offset).AtomicGetPointer().IsNil() {
+					mi.lazyUnmarshal(p, f.num)
+				}
+			}
+			var o unmarshalOutput
+			o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
+			n = o.n
+			if err != nil {
+				break
+			}
+			requiredMask |= f.validation.requiredBit
+			if f.funcs.isInit != nil && !o.initialized {
+				initialized = false
+			}
+			if f.presenceIndex != noPresence {
+				presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+			}
+		default:
+			// Possible extension.
+			if exts == nil && mi.extensionOffset.IsValid() {
+				exts = p.Apply(mi.extensionOffset).Extensions()
+				if *exts == nil {
+					*exts = make(map[int32]ExtensionField)
+				}
+			}
+			if exts == nil {
+				break
+			}
+			var o unmarshalOutput
+			o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
+			if err != nil {
+				break
+			}
+			n = o.n
+			if !o.initialized {
+				initialized = false
+			}
+		}
+		if err != nil {
+			if err != errUnknown {
+				return out, err
+			}
+			n = protowire.ConsumeFieldValue(num, wtyp, b)
+			if n < 0 {
+				return out, errDecode
+			}
+			if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
+				u := mi.mutableUnknownBytes(p)
+				*u = protowire.AppendTag(*u, num, wtyp)
+				*u = append(*u, b[:n]...)
+			}
+		}
+		b = b[n:]
+		end := start - len(b)
+		if lazyDecode && f != nil && f.isLazy {
+			if num != lastNum {
+				lazyIndex = append(lazyIndex, protolazy.IndexEntry{
+					FieldNum: uint32(num),
+					Start:    uint32(pos),
+					End:      uint32(end),
+				})
+			} else {
+				i := len(lazyIndex) - 1
+				lazyIndex[i].End = uint32(end)
+				lazyIndex[i].MultipleContiguous = true
+			}
+		}
+		if num < lastNum {
+			outOfOrder = true
+		}
+		pos = end
+		lastNum = num
+	}
+	if groupTag != 0 {
+		return out, errors.New("missing end group marker")
+	}
+	if lazyFields != nil {
+		// Some fields failed validation, and now need to be unmarshaled.
+		for f, action := range lazyFields {
+			if action != lazyUnmarshalLater {
+				continue
+			}
+			initialized = false
+			if *lazy == nil {
+				*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+			}
+			if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil {
+				return out, err
+			}
+			presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+		}
+	}
+	if lazyDecode {
+		if outOfOrder {
+			sort.Slice(lazyIndex, func(i, j int) bool {
+				return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum ||
+					(lazyIndex[i].FieldNum == lazyIndex[j].FieldNum &&
+						lazyIndex[i].Start < lazyIndex[j].Start)
+			})
+		}
+		if *lazy == nil {
+			*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+		}
+
+		(*lazy).SetIndex(lazyIndex)
+	}
+	if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
+		initialized = false
+	}
+	if initialized {
+		out.initialized = true
+	}
+	out.n = start - len(b)
+	return out, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
index c1c33d00..81b2b1a7 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
@@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber {
 func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum {
 	return e
 }
-func (e *legacyEnumWrapper) protoUnwrap() interface{} {
+func (e *legacyEnumWrapper) protoUnwrap() any {
 	v := reflect.New(e.goTyp).Elem()
 	v.SetInt(int64(e.num))
 	return v.Interface()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 6e8677ee..b6849d66 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool
 func (x placeholderExtension) HasOptionalKeyword() bool                           { return false }
 func (x placeholderExtension) IsExtension() bool                                  { return true }
 func (x placeholderExtension) IsWeak() bool                                       { return false }
+func (x placeholderExtension) IsLazy() bool                                       { return false }
 func (x placeholderExtension) IsPacked() bool                                     { return false }
 func (x placeholderExtension) IsList() bool                                       { return false }
 func (x placeholderExtension) IsMap() bool                                        { return false }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index 950e9a1f..a51dffbe 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -216,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
 	}
 	for _, fn := range methods {
 		for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
-			if vs, ok := v.Interface().([]interface{}); ok {
+			if vs, ok := v.Interface().([]any); ok {
 				for _, v := range vs {
 					oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
 				}
@@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
 	fd.L0.Parent = md
 	fd.L0.Index = n
 
-	if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked {
+	if fd.L1.EditionFeatures.IsPacked {
 		fd.L1.Options = func() protoreflect.ProtoMessage {
 			opts := descopts.Field.ProtoReflect().New()
-			if fd.L1.IsWeak {
-				opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
-			}
 			if fd.L1.EditionFeatures.IsPacked {
 				opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked))
 			}
@@ -567,6 +564,6 @@ func (m aberrantMessage) IsValid() bool {
 func (m aberrantMessage) ProtoMethods() *protoiface.Methods {
 	return aberrantProtoMethods
 }
-func (m aberrantMessage) protoUnwrap() interface{} {
+func (m aberrantMessage) protoUnwrap() any {
 	return m.v.Interface()
 }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
index 7e65f64f..8ffdce67 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/merge.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
 	if src.IsNil() {
 		return
 	}
+
+	var presenceSrc presence
+	var presenceDst presence
+	if mi.presenceOffset.IsValid() {
+		presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo()
+		presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo()
+	}
+
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.merge == nil {
 			continue
 		}
 		sfptr := src.Apply(f.offset)
+
+		if f.presenceIndex != noPresence {
+			if !presenceSrc.Present(f.presenceIndex) {
+				continue
+			}
+			dfptr := dst.Apply(f.offset)
+			if f.isLazy {
+				if sfptr.AtomicGetPointer().IsNil() {
+					mi.lazyUnmarshal(src, f.num)
+				}
+				if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() {
+					mi.lazyUnmarshal(dst, f.num)
+				}
+			}
+			f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
+			presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+			continue
+		}
+
 		if f.isPointer && sfptr.Elem().IsNil() {
 			continue
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 629bacdc..d50423dc 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -14,7 +14,6 @@ import (
 
 	"google.golang.org/protobuf/internal/genid"
 	"google.golang.org/protobuf/reflect/protoreflect"
-	"google.golang.org/protobuf/reflect/protoregistry"
 )
 
 // MessageInfo provides protobuf related functionality for a given Go type
@@ -30,12 +29,12 @@ type MessageInfo struct {
 	// Desc is the underlying message descriptor type and must be populated.
 	Desc protoreflect.MessageDescriptor
 
-	// Exporter must be provided in a purego environment in order to provide
-	// access to unexported fields.
+	// Deprecated: Exporter will be removed the next time we bump
+	// protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
 	Exporter exporter
 
 	// OneofWrappers is list of pointers to oneof wrapper struct types.
-	OneofWrappers []interface{}
+	OneofWrappers []any
 
 	initMu   sync.Mutex // protects all unexported fields
 	initDone uint32
@@ -47,7 +46,7 @@ type MessageInfo struct {
 // exporter is a function that returns a reference to the ith field of v,
 // where v is a pointer to a struct. It returns nil if it does not support
 // exporting the requested field (e.g., already exported).
-type exporter func(v interface{}, i int) interface{}
+type exporter func(v any, i int) any
 
 // getMessageInfo returns the MessageInfo for any message type that
 // is generated by our implementation of protoc-gen-go (for v2 and on).
@@ -79,6 +78,9 @@ func (mi *MessageInfo) initOnce() {
 	if mi.initDone == 1 {
 		return
 	}
+	if opaqueInitHook(mi) {
+		return
+	}
 
 	t := mi.GoReflectType
 	if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
@@ -117,7 +119,6 @@ type (
 
 var (
 	sizecacheType       = reflect.TypeOf(SizeCache(0))
-	weakFieldsType      = reflect.TypeOf(WeakFields(nil))
 	unknownFieldsAType  = reflect.TypeOf(unknownFieldsA(nil))
 	unknownFieldsBType  = reflect.TypeOf(unknownFieldsB(nil))
 	extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
@@ -126,13 +127,14 @@ var (
 type structInfo struct {
 	sizecacheOffset offset
 	sizecacheType   reflect.Type
-	weakOffset      offset
-	weakType        reflect.Type
 	unknownOffset   offset
 	unknownType     reflect.Type
 	extensionOffset offset
 	extensionType   reflect.Type
 
+	lazyOffset     offset
+	presenceOffset offset
+
 	fieldsByNumber        map[protoreflect.FieldNumber]reflect.StructField
 	oneofsByName          map[protoreflect.Name]reflect.StructField
 	oneofWrappersByType   map[reflect.Type]protoreflect.FieldNumber
@@ -142,9 +144,10 @@ type structInfo struct {
 func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
 	si := structInfo{
 		sizecacheOffset: invalidOffset,
-		weakOffset:      invalidOffset,
 		unknownOffset:   invalidOffset,
 		extensionOffset: invalidOffset,
+		lazyOffset:      invalidOffset,
+		presenceOffset:  invalidOffset,
 
 		fieldsByNumber:        map[protoreflect.FieldNumber]reflect.StructField{},
 		oneofsByName:          map[protoreflect.Name]reflect.StructField{},
@@ -157,24 +160,23 @@ fieldLoop:
 		switch f := t.Field(i); f.Name {
 		case genid.SizeCache_goname, genid.SizeCacheA_goname:
 			if f.Type == sizecacheType {
-				si.sizecacheOffset = offsetOf(f, mi.Exporter)
+				si.sizecacheOffset = offsetOf(f)
 				si.sizecacheType = f.Type
 			}
-		case genid.WeakFields_goname, genid.WeakFieldsA_goname:
-			if f.Type == weakFieldsType {
-				si.weakOffset = offsetOf(f, mi.Exporter)
-				si.weakType = f.Type
-			}
 		case genid.UnknownFields_goname, genid.UnknownFieldsA_goname:
 			if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType {
-				si.unknownOffset = offsetOf(f, mi.Exporter)
+				si.unknownOffset = offsetOf(f)
 				si.unknownType = f.Type
 			}
 		case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname:
 			if f.Type == extensionFieldsType {
-				si.extensionOffset = offsetOf(f, mi.Exporter)
+				si.extensionOffset = offsetOf(f)
 				si.extensionType = f.Type
 			}
+		case "lazyFields", "XXX_lazyUnmarshalInfo":
+			si.lazyOffset = offsetOf(f)
+		case "XXX_presence":
+			si.presenceOffset = offsetOf(f)
 		default:
 			for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
 				if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
@@ -201,7 +203,7 @@ fieldLoop:
 	}
 	for _, fn := range methods {
 		for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
-			if vs, ok := v.Interface().([]interface{}); ok {
+			if vs, ok := v.Interface().([]any); ok {
 				oneofWrappers = vs
 			}
 		}
@@ -244,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
 	mi.init()
 	fd := mi.Desc.Fields().Get(i)
 	switch {
-	case fd.IsWeak():
-		mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName())
-		return mt
 	case fd.IsMap():
 		return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]}
 	default:
@@ -256,7 +255,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
 
 type mapEntryType struct {
 	desc    protoreflect.MessageDescriptor
-	valType interface{} // zero value of enum or message type
+	valType any // zero value of enum or message type
 }
 
 func (mt mapEntryType) New() protoreflect.Message {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
new file mode 100644
index 00000000..dd55e8e0
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -0,0 +1,627 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"strings"
+	"sync/atomic"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type opaqueStructInfo struct {
+	structInfo
+}
+
+// isOpaque determines whether a protobuf message type is on the Opaque API.  It
+// checks whether the type is a Go struct that protoc-gen-go would generate.
+//
+// This function only detects newly generated messages from the v2
+// implementation of protoc-gen-go. It is unable to classify generated messages
+// that are too old or those that are generated by a different generator
+// such as protoc-gen-gogo.
+func isOpaque(t reflect.Type) bool {
+	// The current detection mechanism is to simply check the first field
+	// for a struct tag with the "protogen" key.
+	if t.Kind() == reflect.Struct && t.NumField() > 0 {
+		pgt := t.Field(0).Tag.Get("protogen")
+		return strings.HasPrefix(pgt, "opaque.")
+	}
+	return false
+}
+
+func opaqueInitHook(mi *MessageInfo) bool {
+	mt := mi.GoReflectType.Elem()
+	si := opaqueStructInfo{
+		structInfo: mi.makeStructInfo(mt),
+	}
+
+	if !isOpaque(mt) {
+		return false
+	}
+
+	defer atomic.StoreUint32(&mi.initDone, 1)
+
+	mi.fields = map[protoreflect.FieldNumber]*fieldInfo{}
+	fds := mi.Desc.Fields()
+	for i := 0; i < fds.Len(); i++ {
+		fd := fds.Get(i)
+		fs := si.fieldsByNumber[fd.Number()]
+		var fi fieldInfo
+		usePresence, _ := usePresenceForField(si, fd)
+
+		switch {
+		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+			// Oneofs are no different for opaque.
+			fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
+		case fd.IsMap():
+			fi = mi.fieldInfoForMapOpaque(si, fd, fs)
+		case fd.IsList() && fd.Message() == nil && usePresence:
+			fi = mi.fieldInfoForScalarListOpaque(si, fd, fs)
+		case fd.IsList() && fd.Message() == nil:
+			// Proto3 lists without presence can use same access methods as open
+			fi = fieldInfoForList(fd, fs, mi.Exporter)
+		case fd.IsList() && usePresence:
+			fi = mi.fieldInfoForMessageListOpaque(si, fd, fs)
+		case fd.IsList():
+			// Proto3 opaque messages that does not need presence bitmap.
+			// Different representation than open struct, but same logic
+			fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs)
+		case fd.Message() != nil && usePresence:
+			fi = mi.fieldInfoForMessageOpaque(si, fd, fs)
+		case fd.Message() != nil:
+			// Proto3 messages without presence can use same access methods as open
+			fi = fieldInfoForMessage(fd, fs, mi.Exporter)
+		default:
+			fi = mi.fieldInfoForScalarOpaque(si, fd, fs)
+		}
+		mi.fields[fd.Number()] = &fi
+	}
+	mi.oneofs = map[protoreflect.Name]*oneofInfo{}
+	for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
+		od := mi.Desc.Oneofs().Get(i)
+		mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter)
+	}
+
+	mi.denseFields = make([]*fieldInfo, fds.Len()*2)
+	for i := 0; i < fds.Len(); i++ {
+		if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
+			mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
+		}
+	}
+
+	for i := 0; i < fds.Len(); {
+		fd := fds.Get(i)
+		if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() {
+			mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
+			i += od.Fields().Len()
+		} else {
+			mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
+			i++
+		}
+	}
+
+	mi.makeExtensionFieldsFunc(mt, si.structInfo)
+	mi.makeUnknownFieldsFunc(mt, si.structInfo)
+	mi.makeOpaqueCoderMethods(mt, si)
+	mi.makeFieldTypes(si.structInfo)
+
+	return true
+}
+
+func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
+	oi := &oneofInfo{oneofDesc: od}
+	if od.IsSynthetic() {
+		fd := od.Fields().Get(0)
+		index, _ := presenceIndex(mi.Desc, fd)
+		oi.which = func(p pointer) protoreflect.FieldNumber {
+			if p.IsNil() {
+				return 0
+			}
+			if !mi.present(p, index) {
+				return 0
+			}
+			return od.Fields().Get(0).Number()
+		}
+		return oi
+	}
+	// Dispatch to non-opaque oneof implementation for non-synthetic oneofs.
+	return makeOneofInfo(od, si, x)
+}
+
+func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Map {
+		panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
+	}
+	fieldOffset := offsetOf(fs)
+	conv := NewConverter(ft, fd)
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			// Don't bother checking presence bits, since we need to
+			// look at the map length even if the presence bit is set.
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return rv.Len() > 0
+		},
+		clear: func(p pointer) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(reflect.Zero(rv.Type()))
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			pv := conv.GoValueOf(v)
+			if pv.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting map field to read-only value"))
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(pv)
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if v.IsNil() {
+				v.Set(reflect.MakeMap(fs.Type))
+			}
+			return conv.PBValueOf(v)
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+	}
+	conv := NewConverter(reflect.PtrTo(ft), fd)
+	fieldOffset := offsetOf(fs)
+	index, _ := presenceIndex(mi.Desc, fd)
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return rv.Len() > 0
+		},
+		clear: func(p pointer) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(reflect.Zero(rv.Type()))
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
+			if rv.Elem().Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			pv := conv.GoValueOf(v)
+			if pv.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+			}
+			mi.setPresent(p, index)
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(pv.Elem())
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			mi.setPresent(p, index)
+			return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type))
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+	}
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs)
+	index, _ := presenceIndex(mi.Desc, fd)
+	fieldNumber := fd.Number()
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			if !mi.present(p, index) {
+				return false
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				// Lazily unmarshal this field.
+				mi.lazyUnmarshal(p, fieldNumber)
+				sp = p.Apply(fieldOffset).AtomicGetPointer()
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			return rv.Elem().Len() > 0
+		},
+		clear: func(p pointer) {
+			fp := p.Apply(fieldOffset)
+			sp := fp.AtomicGetPointer()
+			if sp.IsNil() {
+				sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+				mi.setPresent(p, index)
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			if !mi.present(p, index) {
+				return conv.Zero()
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				// Lazily unmarshal this field.
+				mi.lazyUnmarshal(p, fieldNumber)
+				sp = p.Apply(fieldOffset).AtomicGetPointer()
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			if rv.Elem().Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			fp := p.Apply(fieldOffset)
+			sp := fp.AtomicGetPointer()
+			if sp.IsNil() {
+				sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+				mi.setPresent(p, index)
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			val := conv.GoValueOf(v)
+			if val.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+			} else {
+				rv.Elem().Set(val.Elem())
+			}
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			fp := p.Apply(fieldOffset)
+			sp := fp.AtomicGetPointer()
+			if sp.IsNil() {
+				if mi.present(p, index) {
+					// Lazily unmarshal this field.
+					mi.lazyUnmarshal(p, fieldNumber)
+					sp = p.Apply(fieldOffset).AtomicGetPointer()
+				} else {
+					sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+					mi.setPresent(p, index)
+				}
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			return conv.PBValueOf(rv)
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+	}
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs)
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				return false
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			return rv.Elem().Len() > 0
+		},
+		clear: func(p pointer) {
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if !sp.IsNil() {
+				rv := sp.AsValueOf(fs.Type.Elem())
+				rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+			}
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				return conv.Zero()
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			if rv.Elem().Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
+				rv.Set(reflect.New(fs.Type.Elem()))
+			}
+			val := conv.GoValueOf(v)
+			if val.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+			} else {
+				rv.Elem().Set(val.Elem())
+			}
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
+				rv.Set(reflect.New(fs.Type.Elem()))
+			}
+			return conv.PBValueOf(rv)
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	nullable := fd.HasPresence()
+	if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() {
+		nullable = true
+	}
+	deref := false
+	if nullable && ft.Kind() == reflect.Ptr {
+		ft = ft.Elem()
+		deref = true
+	}
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs)
+	index, _ := presenceIndex(mi.Desc, fd)
+	var getter func(p pointer) protoreflect.Value
+	if !nullable {
+		getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
+	} else {
+		getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset)
+	}
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			if nullable {
+				return mi.present(p, index)
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			switch rv.Kind() {
+			case reflect.Bool:
+				return rv.Bool()
+			case reflect.Int32, reflect.Int64:
+				return rv.Int() != 0
+			case reflect.Uint32, reflect.Uint64:
+				return rv.Uint() != 0
+			case reflect.Float32, reflect.Float64:
+				return rv.Float() != 0 || math.Signbit(rv.Float())
+			case reflect.String, reflect.Slice:
+				return rv.Len() > 0
+			default:
+				panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen
+			}
+		},
+		clear: func(p pointer) {
+			if nullable {
+				mi.clearPresent(p, index)
+			}
+			// This is only valuable for bytes and strings, but we do it unconditionally.
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(reflect.Zero(rv.Type()))
+		},
+		get: getter,
+		// TODO: Implement unsafe fast path for set?
+		set: func(p pointer, v protoreflect.Value) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if deref {
+				if rv.IsNil() {
+					rv.Set(reflect.New(ft))
+				}
+				rv = rv.Elem()
+			}
+
+			rv.Set(conv.GoValueOf(v))
+			if nullable && rv.Kind() == reflect.Slice && rv.IsNil() {
+				rv.Set(emptyBytes)
+			}
+			if nullable {
+				mi.setPresent(p, index)
+			}
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs)
+	index, _ := presenceIndex(mi.Desc, fd)
+	fieldNumber := fd.Number()
+	elemType := fs.Type.Elem()
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			return mi.present(p, index)
+		},
+		clear: func(p pointer) {
+			mi.clearPresent(p, index)
+			p.Apply(fieldOffset).AtomicSetNilPointer()
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			fp := p.Apply(fieldOffset)
+			mp := fp.AtomicGetPointer()
+			if mp.IsNil() {
+				// Lazily unmarshal this field.
+				mi.lazyUnmarshal(p, fieldNumber)
+				mp = fp.AtomicGetPointer()
+			}
+			rv := mp.AsValueOf(elemType)
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			val := pointerOfValue(conv.GoValueOf(v))
+			if val.IsNil() {
+				panic("invalid nil pointer")
+			}
+			p.Apply(fieldOffset).AtomicSetPointer(val)
+			mi.setPresent(p, index)
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			fp := p.Apply(fieldOffset)
+			mp := fp.AtomicGetPointer()
+			if mp.IsNil() {
+				if mi.present(p, index) {
+					// Lazily unmarshal this field.
+					mi.lazyUnmarshal(p, fieldNumber)
+					mp = fp.AtomicGetPointer()
+				} else {
+					mp = pointerOfValue(conv.GoValueOf(conv.New()))
+					fp.AtomicSetPointer(mp)
+					mi.setPresent(p, index)
+				}
+			}
+			return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem()))
+		},
+		newMessage: func() protoreflect.Message {
+			return conv.New().Message()
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+// A presenceList wraps a List, updating presence bits as necessary when the
+// list contents change.
+type presenceList struct {
+	pvalueList
+	setPresence func(bool)
+}
+type pvalueList interface {
+	protoreflect.List
+	//Unwrapper
+}
+
+func (list presenceList) Append(v protoreflect.Value) {
+	list.pvalueList.Append(v)
+	list.setPresence(true)
+}
+func (list presenceList) Truncate(i int) {
+	list.pvalueList.Truncate(i)
+	list.setPresence(i > 0)
+}
+
+// presenceIndex returns the index to pass to presence functions.
+//
+// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields.
+func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) {
+	found := false
+	var index, numIndices uint32
+	for i := 0; i < md.Fields().Len(); i++ {
+		f := md.Fields().Get(i)
+		if f == fd {
+			found = true
+			index = numIndices
+		}
+		if f.ContainingOneof() == nil || isLastOneofField(f) {
+			numIndices++
+		}
+	}
+	if !found {
+		panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName()))
+	}
+	return index, presenceSize(numIndices)
+}
+
+func isLastOneofField(fd protoreflect.FieldDescriptor) bool {
+	fields := fd.ContainingOneof().Fields()
+	return fields.Get(fields.Len()-1) == fd
+}
+
+func (mi *MessageInfo) setPresent(p pointer, index uint32) {
+	p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize)
+}
+
+func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
+	p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index)
+}
+
+func (mi *MessageInfo) present(p pointer, index uint32) bool {
+	return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
+}
+
+// usePresenceForField implements the somewhat intricate logic of when
+// the presence bitmap is used for a field.  The main logic is that a
+// field that is optional or that can be lazy will use the presence
+// bit, but for proto2, also maps have a presence bit. It also records
+// if the field can ever be lazy, which is true if we have a
+// lazyOffset and the field is a message or a slice of messages. A
+// field that is lazy will always need a presence bit.  Oneofs are not
+// lazy and do not use presence, unless they are a synthetic oneof,
+// which is a proto3 optional field. For proto3 optionals, we use the
+// presence and they can also be lazy when applicable (a message).
+func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+	hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
+
+	// Non-oneof scalar fields with explicit field presence use the presence array.
+	usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
+	switch {
+	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+		return false, false
+	case fd.IsMap():
+		return false, false
+	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+		return hasLazyField, hasLazyField
+	default:
+		return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
new file mode 100644
index 00000000..a6982569
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+	"reflect"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+	ft := fs.Type
+	if ft.Kind() == reflect.Ptr {
+		ft = ft.Elem()
+	}
+	if fd.Kind() == protoreflect.EnumKind {
+		// Enums for nullable opaque types.
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return conv.PBValueOf(rv)
+		}
+	}
+	switch ft.Kind() {
+	case reflect.Bool:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bool()
+			return protoreflect.ValueOfBool(*x)
+		}
+	case reflect.Int32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int32()
+			return protoreflect.ValueOfInt32(*x)
+		}
+	case reflect.Uint32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint32()
+			return protoreflect.ValueOfUint32(*x)
+		}
+	case reflect.Int64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int64()
+			return protoreflect.ValueOfInt64(*x)
+		}
+	case reflect.Uint64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint64()
+			return protoreflect.ValueOfUint64(*x)
+		}
+	case reflect.Float32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float32()
+			return protoreflect.ValueOfFloat32(*x)
+		}
+	case reflect.Float64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float64()
+			return protoreflect.ValueOfFloat64(*x)
+		}
+	case reflect.String:
+		if fd.Kind() == protoreflect.BytesKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() || !mi.present(p, index) {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).StringPtr()
+				if *x == nil {
+					return conv.Zero()
+				}
+				if len(**x) == 0 {
+					return protoreflect.ValueOfBytes(nil)
+				}
+				return protoreflect.ValueOfBytes([]byte(**x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).StringPtr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfString(**x)
+		}
+	case reflect.Slice:
+		if fd.Kind() == protoreflect.StringKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() || !mi.present(p, index) {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).Bytes()
+				return protoreflect.ValueOfString(string(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bytes()
+			return protoreflect.ValueOfBytes(*x)
+		}
+	}
+	panic("unexpected protobuf kind: " + ft.Kind().String())
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index a6f0dbda..0d20132f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -20,7 +20,7 @@ type reflectMessageInfo struct {
 	// fieldTypes contains the zero value of an enum or message field.
 	// For lists, it contains the element type.
 	// For maps, it contains the entry value type.
-	fieldTypes map[protoreflect.FieldNumber]interface{}
+	fieldTypes map[protoreflect.FieldNumber]any
 
 	// denseFields is a subset of fields where:
 	//	0 < fieldDesc.Number() < len(denseFields)
@@ -28,7 +28,7 @@ type reflectMessageInfo struct {
 	denseFields []*fieldInfo
 
 	// rangeInfos is a list of all fields (not belonging to a oneof) and oneofs.
-	rangeInfos []interface{} // either *fieldInfo or *oneofInfo
+	rangeInfos []any // either *fieldInfo or *oneofInfo
 
 	getUnknown   func(pointer) protoreflect.RawFields
 	setUnknown   func(pointer, protoreflect.RawFields)
@@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
 			fi = fieldInfoForMap(fd, fs, mi.Exporter)
 		case fd.IsList():
 			fi = fieldInfoForList(fd, fs, mi.Exporter)
-		case fd.IsWeak():
-			fi = fieldInfoForWeakMessage(fd, si.weakOffset)
 		case fd.Message() != nil:
 			fi = fieldInfoForMessage(fd, fs, mi.Exporter)
 		default:
@@ -205,6 +203,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
 		case fd.IsList():
 			if fd.Enum() != nil || fd.Message() != nil {
 				ft = fs.Type.Elem()
+
+				if ft.Kind() == reflect.Slice {
+					ft = ft.Elem()
+				}
+
 			}
 			isMessage = fd.Message() != nil
 		case fd.Enum() != nil:
@@ -214,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
 			}
 		case fd.Message() != nil:
 			ft = fs.Type
-			if fd.IsWeak() {
-				ft = nil
-			}
 			isMessage = true
 		}
 		if isMessage && ft != nil && ft.Kind() != reflect.Ptr {
@@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
 		}
 		if ft != nil {
 			if mi.fieldTypes == nil {
-				mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{})
+				mi.fieldTypes = make(map[protoreflect.FieldNumber]any)
 			}
 			mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface()
 		}
@@ -255,6 +255,10 @@ func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) {
 	if !ok {
 		return false
 	}
+	if x.isUnexpandedLazy() {
+		// Avoid calling x.Value(), which triggers a lazy unmarshal.
+		return true
+	}
 	switch {
 	case xd.IsList():
 		return x.Value().List().Len() > 0
@@ -389,7 +393,7 @@ var (
 // MessageOf returns a reflective view over a message. The input must be a
 // pointer to a named Go struct. If the provided type has a ProtoReflect method,
 // it must be implemented by calling this method.
-func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message {
+func (mi *MessageInfo) MessageOf(m any) protoreflect.Message {
 	if reflect.TypeOf(m) != mi.GoReflectType {
 		panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
 	}
@@ -417,7 +421,7 @@ func (m *messageIfaceWrapper) Reset() {
 func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message {
 	return (*messageReflectWrapper)(m)
 }
-func (m *messageIfaceWrapper) protoUnwrap() interface{} {
+func (m *messageIfaceWrapper) protoUnwrap() any {
 	return m.p.AsIfaceOf(m.mi.GoReflectType.Elem())
 }
 
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index 986322b1..68d4ae32 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -8,11 +8,8 @@ import (
 	"fmt"
 	"math"
 	"reflect"
-	"sync"
 
-	"google.golang.org/protobuf/internal/flags"
 	"google.golang.org/protobuf/reflect/protoreflect"
-	"google.golang.org/protobuf/reflect/protoregistry"
 )
 
 type fieldInfo struct {
@@ -76,7 +73,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 	isMessage := fd.Message() != nil
 
 	// TODO: Implement unsafe fast path?
-	fieldOffset := offsetOf(fs, x)
+	fieldOffset := offsetOf(fs)
 	return fieldInfo{
 		// NOTE: The logic below intentionally assumes that oneof fields are
 		// well-formatted. That is, the oneof interface never contains a
@@ -152,7 +149,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x
 	conv := NewConverter(ft, fd)
 
 	// TODO: Implement unsafe fast path?
-	fieldOffset := offsetOf(fs, x)
+	fieldOffset := offsetOf(fs)
 	return fieldInfo{
 		fieldDesc: fd,
 		has: func(p pointer) bool {
@@ -205,7 +202,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x
 	conv := NewConverter(reflect.PtrTo(ft), fd)
 
 	// TODO: Implement unsafe fast path?
-	fieldOffset := offsetOf(fs, x)
+	fieldOffset := offsetOf(fs)
 	return fieldInfo{
 		fieldDesc: fd,
 		has: func(p pointer) bool {
@@ -256,6 +253,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 	ft := fs.Type
 	nullable := fd.HasPresence()
 	isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
+	var getter func(p pointer) protoreflect.Value
 	if nullable {
 		if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
 			// This never occurs for generated message types.
@@ -268,19 +266,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 		}
 	}
 	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs)
+
+	// Generate specialized getter functions to avoid going through reflect.Value
+	if nullable {
+		getter = getterForNullableScalar(fd, fs, conv, fieldOffset)
+	} else {
+		getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
+	}
 
-	// TODO: Implement unsafe fast path?
-	fieldOffset := offsetOf(fs, x)
 	return fieldInfo{
 		fieldDesc: fd,
 		has: func(p pointer) bool {
 			if p.IsNil() {
 				return false
 			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			if nullable {
-				return !rv.IsNil()
+				return !p.Apply(fieldOffset).Elem().IsNil()
 			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			switch rv.Kind() {
 			case reflect.Bool:
 				return rv.Bool()
@@ -300,21 +304,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			rv.Set(reflect.Zero(rv.Type()))
 		},
-		get: func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			if nullable {
-				if rv.IsNil() {
-					return conv.Zero()
-				}
-				if rv.Kind() == reflect.Ptr {
-					rv = rv.Elem()
-				}
-			}
-			return conv.PBValueOf(rv)
-		},
+		get: getter,
+		// TODO: Implement unsafe fast path for set?
 		set: func(p pointer, v protoreflect.Value) {
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			if nullable && rv.Kind() == reflect.Ptr {
@@ -338,85 +329,12 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 	}
 }
 
-func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo {
-	if !flags.ProtoLegacy {
-		panic("no support for proto1 weak fields")
-	}
-
-	var once sync.Once
-	var messageType protoreflect.MessageType
-	lazyInit := func() {
-		once.Do(func() {
-			messageName := fd.Message().FullName()
-			messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
-			if messageType == nil {
-				panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
-			}
-		})
-	}
-
-	num := fd.Number()
-	return fieldInfo{
-		fieldDesc: fd,
-		has: func(p pointer) bool {
-			if p.IsNil() {
-				return false
-			}
-			_, ok := p.Apply(weakOffset).WeakFields().get(num)
-			return ok
-		},
-		clear: func(p pointer) {
-			p.Apply(weakOffset).WeakFields().clear(num)
-		},
-		get: func(p pointer) protoreflect.Value {
-			lazyInit()
-			if p.IsNil() {
-				return protoreflect.ValueOfMessage(messageType.Zero())
-			}
-			m, ok := p.Apply(weakOffset).WeakFields().get(num)
-			if !ok {
-				return protoreflect.ValueOfMessage(messageType.Zero())
-			}
-			return protoreflect.ValueOfMessage(m.ProtoReflect())
-		},
-		set: func(p pointer, v protoreflect.Value) {
-			lazyInit()
-			m := v.Message()
-			if m.Descriptor() != messageType.Descriptor() {
-				if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want {
-					panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want))
-				}
-				panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName()))
-			}
-			p.Apply(weakOffset).WeakFields().set(num, m.Interface())
-		},
-		mutable: func(p pointer) protoreflect.Value {
-			lazyInit()
-			fs := p.Apply(weakOffset).WeakFields()
-			m, ok := fs.get(num)
-			if !ok {
-				m = messageType.New().Interface()
-				fs.set(num, m)
-			}
-			return protoreflect.ValueOfMessage(m.ProtoReflect())
-		},
-		newMessage: func() protoreflect.Message {
-			lazyInit()
-			return messageType.New()
-		},
-		newField: func() protoreflect.Value {
-			lazyInit()
-			return protoreflect.ValueOfMessage(messageType.New())
-		},
-	}
-}
-
 func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
 	ft := fs.Type
 	conv := NewConverter(ft, fd)
 
 	// TODO: Implement unsafe fast path?
-	fieldOffset := offsetOf(fs, x)
+	fieldOffset := offsetOf(fs)
 	return fieldInfo{
 		fieldDesc: fd,
 		has: func(p pointer) bool {
@@ -425,7 +343,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField
 			}
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			if fs.Type.Kind() != reflect.Ptr {
-				return !isZero(rv)
+				return !rv.IsZero()
 			}
 			return !rv.IsNil()
 		},
@@ -472,7 +390,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
 	oi := &oneofInfo{oneofDesc: od}
 	if od.IsSynthetic() {
 		fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
-		fieldOffset := offsetOf(fs, x)
+		fieldOffset := offsetOf(fs)
 		oi.which = func(p pointer) protoreflect.FieldNumber {
 			if p.IsNil() {
 				return 0
@@ -485,7 +403,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
 		}
 	} else {
 		fs := si.oneofsByName[od.Name()]
-		fieldOffset := offsetOf(fs, x)
+		fieldOffset := offsetOf(fs)
 		oi.which = func(p pointer) protoreflect.FieldNumber {
 			if p.IsNil() {
 				return 0
@@ -503,41 +421,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
 	}
 	return oi
 }
-
-// isZero is identical to reflect.Value.IsZero.
-// TODO: Remove this when Go1.13 is the minimally supported Go version.
-func isZero(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return math.Float64bits(v.Float()) == 0
-	case reflect.Complex64, reflect.Complex128:
-		c := v.Complex()
-		return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
-	case reflect.Array:
-		for i := 0; i < v.Len(); i++ {
-			if !isZero(v.Index(i)) {
-				return false
-			}
-		}
-		return true
-	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
-		return v.IsNil()
-	case reflect.String:
-		return v.Len() == 0
-	case reflect.Struct:
-		for i := 0; i < v.NumField(); i++ {
-			if !isZero(v.Field(i)) {
-				return false
-			}
-		}
-		return true
-	default:
-		panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()})
-	}
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
new file mode 100644
index 00000000..af5e063a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
@@ -0,0 +1,273 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+	"reflect"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+	ft := fs.Type
+	if ft.Kind() == reflect.Ptr {
+		ft = ft.Elem()
+	}
+	if fd.Kind() == protoreflect.EnumKind {
+		elemType := fs.Type.Elem()
+		// Enums for nullable types.
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType)
+			if rv.IsNil() {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv.Elem())
+		}
+	}
+	switch ft.Kind() {
+	case reflect.Bool:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).BoolPtr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfBool(**x)
+		}
+	case reflect.Int32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int32Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfInt32(**x)
+		}
+	case reflect.Uint32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint32Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfUint32(**x)
+		}
+	case reflect.Int64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int64Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfInt64(**x)
+		}
+	case reflect.Uint64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint64Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfUint64(**x)
+		}
+	case reflect.Float32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float32Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfFloat32(**x)
+		}
+	case reflect.Float64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float64Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfFloat64(**x)
+		}
+	case reflect.String:
+		if fd.Kind() == protoreflect.BytesKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).StringPtr()
+				if *x == nil {
+					return conv.Zero()
+				}
+				if len(**x) == 0 {
+					return protoreflect.ValueOfBytes(nil)
+				}
+				return protoreflect.ValueOfBytes([]byte(**x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).StringPtr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfString(**x)
+		}
+	case reflect.Slice:
+		if fd.Kind() == protoreflect.StringKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).Bytes()
+				if len(*x) == 0 {
+					return conv.Zero()
+				}
+				return protoreflect.ValueOfString(string(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bytes()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfBytes(*x)
+		}
+	}
+	panic("unexpected protobuf kind: " + ft.Kind().String())
+}
+
+func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+	ft := fs.Type
+	if fd.Kind() == protoreflect.EnumKind {
+		// Enums for non nullable types.
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return conv.PBValueOf(rv)
+		}
+	}
+	switch ft.Kind() {
+	case reflect.Bool:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bool()
+			return protoreflect.ValueOfBool(*x)
+		}
+	case reflect.Int32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int32()
+			return protoreflect.ValueOfInt32(*x)
+		}
+	case reflect.Uint32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint32()
+			return protoreflect.ValueOfUint32(*x)
+		}
+	case reflect.Int64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int64()
+			return protoreflect.ValueOfInt64(*x)
+		}
+	case reflect.Uint64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint64()
+			return protoreflect.ValueOfUint64(*x)
+		}
+	case reflect.Float32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float32()
+			return protoreflect.ValueOfFloat32(*x)
+		}
+	case reflect.Float64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float64()
+			return protoreflect.ValueOfFloat64(*x)
+		}
+	case reflect.String:
+		if fd.Kind() == protoreflect.BytesKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).String()
+				if len(*x) == 0 {
+					return protoreflect.ValueOfBytes(nil)
+				}
+				return protoreflect.ValueOfBytes([]byte(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).String()
+			return protoreflect.ValueOfString(*x)
+		}
+	case reflect.Slice:
+		if fd.Kind() == protoreflect.StringKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).Bytes()
+				return protoreflect.ValueOfString(string(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bytes()
+			return protoreflect.ValueOfBytes(*x)
+		}
+	}
+	panic("unexpected protobuf kind: " + ft.Kind().String())
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
index 29ba6bd3..99dc23c6 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
@@ -23,7 +23,7 @@ func (m *messageState) New() protoreflect.Message {
 func (m *messageState) Interface() protoreflect.ProtoMessage {
 	return m.protoUnwrap().(protoreflect.ProtoMessage)
 }
-func (m *messageState) protoUnwrap() interface{} {
+func (m *messageState) protoUnwrap() any {
 	return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
 }
 func (m *messageState) ProtoMethods() *protoiface.Methods {
@@ -154,7 +154,7 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage {
 	}
 	return (*messageIfaceWrapper)(m)
 }
-func (m *messageReflectWrapper) protoUnwrap() interface{} {
+func (m *messageReflectWrapper) protoUnwrap() any {
 	return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
 }
 func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
deleted file mode 100644
index 517e9443..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
-	"fmt"
-	"reflect"
-	"sync"
-)
-
-const UnsafeEnabled = false
-
-// Pointer is an opaque pointer type.
-type Pointer interface{}
-
-// offset represents the offset to a struct field, accessible from a pointer.
-// The offset is the field index into a struct.
-type offset struct {
-	index  int
-	export exporter
-}
-
-// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
-	if len(f.Index) != 1 {
-		panic("embedded structs are not supported")
-	}
-	if f.PkgPath == "" {
-		return offset{index: f.Index[0]} // field is already exported
-	}
-	if x == nil {
-		panic("exporter must be provided for unexported field")
-	}
-	return offset{index: f.Index[0], export: x}
-}
-
-// IsValid reports whether the offset is valid.
-func (f offset) IsValid() bool { return f.index >= 0 }
-
-// invalidOffset is an invalid field offset.
-var invalidOffset = offset{index: -1}
-
-// zeroOffset is a noop when calling pointer.Apply.
-var zeroOffset = offset{index: 0}
-
-// pointer is an abstract representation of a pointer to a struct or field.
-type pointer struct{ v reflect.Value }
-
-// pointerOf returns p as a pointer.
-func pointerOf(p Pointer) pointer {
-	return pointerOfIface(p)
-}
-
-// pointerOfValue returns v as a pointer.
-func pointerOfValue(v reflect.Value) pointer {
-	return pointer{v: v}
-}
-
-// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v interface{}) pointer {
-	return pointer{v: reflect.ValueOf(v)}
-}
-
-// IsNil reports whether the pointer is nil.
-func (p pointer) IsNil() bool {
-	return p.v.IsNil()
-}
-
-// Apply adds an offset to the pointer to derive a new pointer
-// to a specified field. The current pointer must be pointing at a struct.
-func (p pointer) Apply(f offset) pointer {
-	if f.export != nil {
-		if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
-			return pointer{v: v}
-		}
-	}
-	return pointer{v: p.v.Elem().Field(f.index).Addr()}
-}
-
-// AsValueOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
-func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
-	if got := p.v.Type().Elem(); got != t {
-		panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
-	}
-	return p.v
-}
-
-// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
-	return p.AsValueOf(t).Interface()
-}
-
-func (p pointer) Bool() *bool              { return p.v.Interface().(*bool) }
-func (p pointer) BoolPtr() **bool          { return p.v.Interface().(**bool) }
-func (p pointer) BoolSlice() *[]bool       { return p.v.Interface().(*[]bool) }
-func (p pointer) Int32() *int32            { return p.v.Interface().(*int32) }
-func (p pointer) Int32Ptr() **int32        { return p.v.Interface().(**int32) }
-func (p pointer) Int32Slice() *[]int32     { return p.v.Interface().(*[]int32) }
-func (p pointer) Int64() *int64            { return p.v.Interface().(*int64) }
-func (p pointer) Int64Ptr() **int64        { return p.v.Interface().(**int64) }
-func (p pointer) Int64Slice() *[]int64     { return p.v.Interface().(*[]int64) }
-func (p pointer) Uint32() *uint32          { return p.v.Interface().(*uint32) }
-func (p pointer) Uint32Ptr() **uint32      { return p.v.Interface().(**uint32) }
-func (p pointer) Uint32Slice() *[]uint32   { return p.v.Interface().(*[]uint32) }
-func (p pointer) Uint64() *uint64          { return p.v.Interface().(*uint64) }
-func (p pointer) Uint64Ptr() **uint64      { return p.v.Interface().(**uint64) }
-func (p pointer) Uint64Slice() *[]uint64   { return p.v.Interface().(*[]uint64) }
-func (p pointer) Float32() *float32        { return p.v.Interface().(*float32) }
-func (p pointer) Float32Ptr() **float32    { return p.v.Interface().(**float32) }
-func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
-func (p pointer) Float64() *float64        { return p.v.Interface().(*float64) }
-func (p pointer) Float64Ptr() **float64    { return p.v.Interface().(**float64) }
-func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
-func (p pointer) String() *string          { return p.v.Interface().(*string) }
-func (p pointer) StringPtr() **string      { return p.v.Interface().(**string) }
-func (p pointer) StringSlice() *[]string   { return p.v.Interface().(*[]string) }
-func (p pointer) Bytes() *[]byte           { return p.v.Interface().(*[]byte) }
-func (p pointer) BytesPtr() **[]byte       { return p.v.Interface().(**[]byte) }
-func (p pointer) BytesSlice() *[][]byte    { return p.v.Interface().(*[][]byte) }
-func (p pointer) WeakFields() *weakFields  { return (*weakFields)(p.v.Interface().(*WeakFields)) }
-func (p pointer) Extensions() *map[int32]ExtensionField {
-	return p.v.Interface().(*map[int32]ExtensionField)
-}
-
-func (p pointer) Elem() pointer {
-	return pointer{v: p.v.Elem()}
-}
-
-// PointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) PointerSlice() []pointer {
-	// TODO: reconsider this
-	if p.v.IsNil() {
-		return nil
-	}
-	n := p.v.Elem().Len()
-	s := make([]pointer, n)
-	for i := 0; i < n; i++ {
-		s[i] = pointer{v: p.v.Elem().Index(i)}
-	}
-	return s
-}
-
-// AppendPointerSlice appends v to p, which must be a []*T.
-func (p pointer) AppendPointerSlice(v pointer) {
-	sp := p.v.Elem()
-	sp.Set(reflect.Append(sp, v.v))
-}
-
-// SetPointer sets *p to v.
-func (p pointer) SetPointer(v pointer) {
-	p.v.Elem().Set(v.v)
-}
-
-func growSlice(p pointer, addCap int) {
-	// TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
-	in := p.v.Elem()
-	out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
-	reflect.Copy(out, in)
-	p.v.Elem().Set(out)
-}
-
-func (p pointer) growBoolSlice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growInt32Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growUint32Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growInt64Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growUint64Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growFloat64Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growFloat32Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (Export) MessageStateOf(p Pointer) *messageState     { panic("not supported") }
-func (ms *messageState) pointer() pointer                 { panic("not supported") }
-func (ms *messageState) messageInfo() *MessageInfo        { panic("not supported") }
-func (ms *messageState) LoadMessageInfo() *MessageInfo    { panic("not supported") }
-func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
-
-type atomicNilMessage struct {
-	once sync.Once
-	m    messageReflectWrapper
-}
-
-func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
-	m.once.Do(func() {
-		m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
-		m.m.mi = mi
-	})
-	return &m.m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 4b020e31..62f8bf66 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -2,15 +2,14 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
 package impl
 
 import (
 	"reflect"
 	"sync/atomic"
 	"unsafe"
+
+	"google.golang.org/protobuf/internal/protolazy"
 )
 
 const UnsafeEnabled = true
@@ -23,7 +22,7 @@ type Pointer unsafe.Pointer
 type offset uintptr
 
 // offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
+func offsetOf(f reflect.StructField) offset {
 	return offset(f.Offset)
 }
 
@@ -50,7 +49,7 @@ func pointerOfValue(v reflect.Value) pointer {
 }
 
 // pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v interface{}) pointer {
+func pointerOfIface(v any) pointer {
 	type ifaceHeader struct {
 		Type unsafe.Pointer
 		Data unsafe.Pointer
@@ -80,7 +79,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
 
 // AsIfaceOf treats p as a pointer to an object of type t and returns the value.
 // It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
+func (p pointer) AsIfaceOf(t reflect.Type) any {
 	// TODO: Use tricky unsafe magic to directly create ifaceHeader.
 	return p.AsValueOf(t).Interface()
 }
@@ -112,8 +111,14 @@ func (p pointer) StringSlice() *[]string                { return (*[]string)(p.p
 func (p pointer) Bytes() *[]byte                        { return (*[]byte)(p.p) }
 func (p pointer) BytesPtr() **[]byte                    { return (**[]byte)(p.p) }
 func (p pointer) BytesSlice() *[][]byte                 { return (*[][]byte)(p.p) }
-func (p pointer) WeakFields() *weakFields               { return (*weakFields)(p.p) }
 func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
+func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
+	return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
+}
+
+func (p pointer) PresenceInfo() presence {
+	return presence{P: p.p}
+}
 
 func (p pointer) Elem() pointer {
 	return pointer{p: *(*unsafe.Pointer)(p.p)}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
new file mode 100644
index 00000000..38aa7b7d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
@@ -0,0 +1,42 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+func (p pointer) AtomicGetPointer() pointer {
+	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+func (p pointer) AtomicSetPointer(v pointer) {
+	atomic.StorePointer((*unsafe.Pointer)(p.p), v.p)
+}
+
+func (p pointer) AtomicSetNilPointer() {
+	atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil))
+}
+
+func (p pointer) AtomicSetPointerIfNil(v pointer) pointer {
+	if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) {
+		return v
+	}
+	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+type atomicV1MessageInfo struct{ p Pointer }
+
+func (mi *atomicV1MessageInfo) Get() Pointer {
+	return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p)))
+}
+
+func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer {
+	if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) {
+		return p
+	}
+	return mi.Get()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
new file mode 100644
index 00000000..914cb1de
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -0,0 +1,142 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+// presenceSize represents the size of a presence set, which should be the largest index of the set+1
+type presenceSize uint32
+
+// presence is the internal representation of the bitmap array in a generated protobuf
+type presence struct {
+	// This is a pointer to the beginning of an array of uint32
+	P unsafe.Pointer
+}
+
+func (p presence) toElem(num uint32) (ret *uint32) {
+	const (
+		bitsPerByte = 8
+		siz         = unsafe.Sizeof(*ret)
+	)
+	// p.P points to an array of uint32, num is the bit in this array that the
+	// caller wants to check/manipulate. Calculate the index in the array that
+	// contains this specific bit. E.g.: 76 / 32 = 2 (integer division).
+	offset := uintptr(num) / (siz * bitsPerByte) * siz
+	return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset))
+}
+
+// Present checks for the presence of a specific field number in a presence set.
+func (p presence) Present(num uint32) bool {
+	if p.P == nil {
+		return false
+	}
+	return Export{}.Present(p.toElem(num), num)
+}
+
+// SetPresent adds presence for a specific field number in a presence set.
+func (p presence) SetPresent(num uint32, size presenceSize) {
+	Export{}.SetPresent(p.toElem(num), num, uint32(size))
+}
+
+// SetPresentUnatomic adds presence for a specific field number in a presence set without using
+// atomic operations. Only to be called during unmarshaling.
+func (p presence) SetPresentUnatomic(num uint32, size presenceSize) {
+	Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size))
+}
+
+// ClearPresent removes presence for a specific field number in a presence set.
+func (p presence) ClearPresent(num uint32) {
+	Export{}.ClearPresent(p.toElem(num), num)
+}
+
+// LoadPresenceCache (together with PresentInCache) allows for a
+// cached version of checking for presence without re-reading the word
+// for every field. It is optimized for efficiency and assumes no
+// simltaneous mutation of the presence set (or at least does not have
+// a problem with simultaneous mutation giving inconsistent results).
+func (p presence) LoadPresenceCache() (current uint32) {
+	if p.P == nil {
+		return 0
+	}
+	return atomic.LoadUint32((*uint32)(p.P))
+}
+
+// PresentInCache reads presence from a cached word in the presence
+// bitmap. It caches up a new word if the bit is outside the
+// word. This is for really fast iteration through bitmaps in cases
+// where we either know that the bitmap will not be altered, or we
+// don't care about inconsistencies caused by simultaneous writes.
+func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool {
+	if num/32 != *cachedElement {
+		o := uintptr(num/32) * unsafe.Sizeof(uint32(0))
+		q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
+		*current = atomic.LoadUint32(q)
+		*cachedElement = num / 32
+	}
+	return (*current & (1 << (num % 32))) > 0
+}
+
+// AnyPresent checks if any field is marked as present in the bitmap.
+func (p presence) AnyPresent(size presenceSize) bool {
+	n := uintptr((size + 31) / 32)
+	for j := uintptr(0); j < n; j++ {
+		o := j * unsafe.Sizeof(uint32(0))
+		q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
+		b := atomic.LoadUint32(q)
+		if b > 0 {
+			return true
+		}
+	}
+	return false
+}
+
+// toRaceDetectData finds the preceding RaceDetectHookData in a
+// message by using pointer arithmetic. As the type of the presence
+// set (bitmap) varies with the number of fields in the protobuf, we
+// can not have a struct type containing the array and the
+// RaceDetectHookData.  instead the RaceDetectHookData is placed
+// immediately before the bitmap array, and we find it by walking
+// backwards in the struct.
+//
+// This method is only called from the race-detect version of the code,
+// so RaceDetectHookData is never an empty struct.
+func (p presence) toRaceDetectData() *RaceDetectHookData {
+	var template struct {
+		d RaceDetectHookData
+		a [1]uint32
+	}
+	o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d)))
+	return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o))
+}
+
+func atomicLoadShadowPresence(p **[]byte) *[]byte {
+	return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreShadowPresence(p **[]byte, v *[]byte) {
+	atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v))
+}
+
+// findPointerToRaceDetectData finds the preceding RaceDetectHookData
+// in a message by using pointer arithmetic. For the methods called
+// directy from generated code, we don't have a pointer to the
+// beginning of the presence set, but a pointer inside the array. As
+// we know the index of the bit we're manipulating (num), we can
+// calculate which element of the array ptr is pointing to. With that
+// information we find the preceding RaceDetectHookData and can
+// manipulate the shadow bitmap.
+//
+// This method is only called from the race-detect version of the
+// code, so RaceDetectHookData is never an empty struct.
+func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData {
+	var template struct {
+		d RaceDetectHookData
+		a [1]uint32
+	}
+	o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0))
+	return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index a24e6bbd..7b2995dd 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -37,6 +37,10 @@ const (
 
 	// ValidationValid indicates that unmarshaling the message will succeed.
 	ValidationValid
+
+	// ValidationWrongWireType indicates that a validated field does not have
+	// the expected wire type.
+	ValidationWrongWireType
 )
 
 func (v ValidationStatus) String() string {
@@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
 		switch fd.Kind() {
 		case protoreflect.MessageKind:
 			vi.typ = validationTypeMessage
+
+			if ft.Kind() == reflect.Ptr {
+				// Repeated opaque message fields are *[]*T.
+				ft = ft.Elem()
+			}
+
 			if ft.Kind() == reflect.Slice {
 				vi.mi = getMessageInfo(ft.Elem())
 			}
 		case protoreflect.GroupKind:
 			vi.typ = validationTypeGroup
+
+			if ft.Kind() == reflect.Ptr {
+				// Repeated opaque message fields are *[]*T.
+				ft = ft.Elem()
+			}
+
 			if ft.Kind() == reflect.Slice {
 				vi.mi = getMessageInfo(ft.Elem())
 			}
@@ -195,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
 		switch fd.Kind() {
 		case protoreflect.MessageKind:
 			vi.typ = validationTypeMessage
-			if !fd.IsWeak() {
-				vi.mi = getMessageInfo(ft)
-			}
+			vi.mi = getMessageInfo(ft)
 		case protoreflect.GroupKind:
 			vi.typ = validationTypeGroup
 			vi.mi = getMessageInfo(ft)
@@ -304,26 +318,6 @@ State:
 				}
 				if f != nil {
 					vi = f.validation
-					if vi.typ == validationTypeMessage && vi.mi == nil {
-						// Probable weak field.
-						//
-						// TODO: Consider storing the results of this lookup somewhere
-						// rather than recomputing it on every validation.
-						fd := st.mi.Desc.Fields().ByNumber(num)
-						if fd == nil || !fd.IsWeak() {
-							break
-						}
-						messageName := fd.Message().FullName()
-						messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName)
-						switch err {
-						case nil:
-							vi.mi, _ = messageType.(*MessageInfo)
-						case protoregistry.NotFound:
-							vi.typ = validationTypeBytes
-						default:
-							return out, ValidationUnknown
-						}
-					}
 					break
 				}
 				// Possible extension field.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go
deleted file mode 100644
index eb79a7ba..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/weak.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
-	"fmt"
-
-	"google.golang.org/protobuf/reflect/protoreflect"
-	"google.golang.org/protobuf/reflect/protoregistry"
-)
-
-// weakFields adds methods to the exported WeakFields type for internal use.
-//
-// The exported type is an alias to an unnamed type, so methods can't be
-// defined directly on it.
-type weakFields WeakFields
-
-func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) {
-	m, ok := w[int32(num)]
-	return m, ok
-}
-
-func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) {
-	if *w == nil {
-		*w = make(weakFields)
-	}
-	(*w)[int32(num)] = m
-}
-
-func (w *weakFields) clear(num protoreflect.FieldNumber) {
-	delete(*w, int32(num))
-}
-
-func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool {
-	_, ok := w[int32(num)]
-	return ok
-}
-
-func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) {
-	delete(*w, int32(num))
-}
-
-func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage {
-	if m, ok := w[int32(num)]; ok {
-		return m
-	}
-	mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
-	if mt == nil {
-		panic(fmt.Sprintf("message %v for weak field is not linked in", name))
-	}
-	return mt.Zero().Interface()
-}
-
-func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) {
-	if m != nil {
-		mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
-		if mt == nil {
-			panic(fmt.Sprintf("message %v for weak field is not linked in", name))
-		}
-		if mt != m.ProtoReflect().Type() {
-			panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface()))
-		}
-	}
-	if m == nil || !m.ProtoReflect().IsValid() {
-		delete(*w, int32(num))
-		return
-	}
-	if *w == nil {
-		*w = make(weakFields)
-	}
-	(*w)[int32(num)] = m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go
index 1665a68e..a1f09162 100644
--- a/vendor/google.golang.org/protobuf/internal/order/range.go
+++ b/vendor/google.golang.org/protobuf/internal/order/range.go
@@ -18,7 +18,7 @@ type messageField struct {
 }
 
 var messageFieldPool = sync.Pool{
-	New: func() interface{} { return new([]messageField) },
+	New: func() any { return new([]messageField) },
 }
 
 type (
@@ -69,7 +69,7 @@ type mapEntry struct {
 }
 
 var mapEntryPool = sync.Pool{
-	New: func() interface{} { return new([]mapEntry) },
+	New: func() any { return new([]mapEntry) },
 }
 
 type (
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
new file mode 100644
index 00000000..82e5cab4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
@@ -0,0 +1,364 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper code for parsing a protocol buffer
+
+package protolazy
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"google.golang.org/protobuf/encoding/protowire"
+)
+
+// BufferReader is a structure encapsulating a protobuf and a current position
+type BufferReader struct {
+	Buf []byte
+	Pos int
+}
+
+// NewBufferReader creates a new BufferRead from a protobuf
+func NewBufferReader(buf []byte) BufferReader {
+	return BufferReader{Buf: buf, Pos: 0}
+}
+
+var errOutOfBounds = errors.New("protobuf decoding: out of bounds")
+var errOverflow = errors.New("proto: integer overflow")
+
+func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) {
+	i := b.Pos
+	l := len(b.Buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		v := b.Buf[i]
+		i++
+		x |= (uint64(v) & 0x7F) << shift
+		if v < 0x80 {
+			b.Pos = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// decodeVarint decodes a varint at the current position
+func (b *BufferReader) DecodeVarint() (x uint64, err error) {
+	i := b.Pos
+	buf := b.Buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		b.Pos++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return b.DecodeVarintSlow()
+	}
+
+	var v uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) & 127
+	i++
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 7
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 14
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 21
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 28
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 35
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 42
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 49
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 56
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 63
+	if v < 128 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	b.Pos = i
+	return
+}
+
+// decodeVarint32 decodes a varint32 at the current position
+func (b *BufferReader) DecodeVarint32() (x uint32, err error) {
+	i := b.Pos
+	buf := b.Buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		b.Pos++
+		return uint32(buf[i]), nil
+	} else if len(buf)-i < 5 {
+		v, err := b.DecodeVarintSlow()
+		return uint32(v), err
+	}
+
+	var v uint32
+	// we already checked the first byte
+	x = uint32(buf[i]) & 127
+	i++
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 7
+	if v < 128 {
+		goto done
+	}
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 14
+	if v < 128 {
+		goto done
+	}
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 21
+	if v < 128 {
+		goto done
+	}
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 28
+	if v < 128 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	b.Pos = i
+	return
+}
+
+// skipValue skips a value in the protobuf, based on the specified tag
+func (b *BufferReader) SkipValue(tag uint32) (err error) {
+	wireType := tag & 0x7
+	switch protowire.Type(wireType) {
+	case protowire.VarintType:
+		err = b.SkipVarint()
+	case protowire.Fixed64Type:
+		err = b.SkipFixed64()
+	case protowire.BytesType:
+		var n uint32
+		n, err = b.DecodeVarint32()
+		if err == nil {
+			err = b.Skip(int(n))
+		}
+	case protowire.StartGroupType:
+		err = b.SkipGroup(tag)
+	case protowire.Fixed32Type:
+		err = b.SkipFixed32()
+	default:
+		err = fmt.Errorf("Unexpected wire type (%d)", wireType)
+	}
+	return
+}
+
+// skipGroup skips a group with the specified tag.  It executes efficiently using a tag stack
+func (b *BufferReader) SkipGroup(tag uint32) (err error) {
+	tagStack := make([]uint32, 0, 16)
+	tagStack = append(tagStack, tag)
+	var n uint32
+	for len(tagStack) > 0 {
+		tag, err = b.DecodeVarint32()
+		if err != nil {
+			return err
+		}
+		switch protowire.Type(tag & 0x7) {
+		case protowire.VarintType:
+			err = b.SkipVarint()
+		case protowire.Fixed64Type:
+			err = b.Skip(8)
+		case protowire.BytesType:
+			n, err = b.DecodeVarint32()
+			if err == nil {
+				err = b.Skip(int(n))
+			}
+		case protowire.StartGroupType:
+			tagStack = append(tagStack, tag)
+		case protowire.Fixed32Type:
+			err = b.SkipFixed32()
+		case protowire.EndGroupType:
+			if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) {
+				tagStack = tagStack[:len(tagStack)-1]
+			} else {
+				err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d",
+					protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// skipVarint effiently skips a varint
+func (b *BufferReader) SkipVarint() (err error) {
+	i := b.Pos
+
+	if len(b.Buf)-i < 10 {
+		// Use DecodeVarintSlow() to check for buffer overflow, but ignore result
+		if _, err := b.DecodeVarintSlow(); err != nil {
+			return err
+		}
+		return nil
+	}
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	return errOverflow
+
+out:
+	b.Pos = i + 1
+	return nil
+}
+
+// skip skips the specified number of bytes
+func (b *BufferReader) Skip(n int) (err error) {
+	if len(b.Buf) < b.Pos+n {
+		return io.ErrUnexpectedEOF
+	}
+	b.Pos += n
+	return
+}
+
+// skipFixed64 skips a fixed64
+func (b *BufferReader) SkipFixed64() (err error) {
+	return b.Skip(8)
+}
+
+// skipFixed32 skips a fixed32
+func (b *BufferReader) SkipFixed32() (err error) {
+	return b.Skip(4)
+}
+
+// skipBytes skips a set of bytes
+func (b *BufferReader) SkipBytes() (err error) {
+	n, err := b.DecodeVarint32()
+	if err != nil {
+		return err
+	}
+	return b.Skip(int(n))
+}
+
+// Done returns whether we are at the end of the protobuf
+func (b *BufferReader) Done() bool {
+	return b.Pos == len(b.Buf)
+}
+
+// Remaining returns how many bytes remain
+func (b *BufferReader) Remaining() int {
+	return len(b.Buf) - b.Pos
+}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
new file mode 100644
index 00000000..ff4d4834
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
@@ -0,0 +1,359 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protolazy contains internal data structures for lazy message decoding.
+package protolazy
+
+import (
+	"fmt"
+	"sort"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// IndexEntry is the structure for an index of the fields in a message of a
+// proto (not descending to sub-messages)
+type IndexEntry struct {
+	FieldNum uint32
+	// first byte of this tag/field
+	Start uint32
+	// first byte after a contiguous sequence of bytes for this tag/field, which could
+	// include a single encoding of the field, or multiple encodings for the field
+	End uint32
+	// True if this protobuf segment includes multiple encodings of the field
+	MultipleContiguous bool
+}
+
+// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+type XXX_lazyUnmarshalInfo struct {
+	// Index of fields and their positions in the protobuf for this
+	// message.  Make index be a pointer to a slice so it can be updated
+	// atomically.  The index pointer is only set once (lazily when/if
+	// the index is first needed), and must always be SET and LOADED
+	// ATOMICALLY.
+	index *[]IndexEntry
+	// The protobuf associated with this lazily decoded message.  It is
+	// only set during proto.Unmarshal().  It doesn't need to be set and
+	// loaded atomically, since any simultaneous set (Unmarshal) and read
+	// (during a get) would already be a race in the app code.
+	Protobuf []byte
+	// The flags present when Unmarshal was originally called for this particular message
+	unmarshalFlags piface.UnmarshalInputFlags
+}
+
+// The Buffer and SetBuffer methods let v2/internal/impl interact with
+// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle.
+
+// Buffer returns the lazy unmarshal buffer.
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte {
+	return lazy.Protobuf
+}
+
+// SetBuffer sets the lazy unmarshal buffer.
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) {
+	lazy.Protobuf = b
+}
+
+// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags.
+// The flags should reflect how Unmarshal was called.
+func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) {
+	lazy.unmarshalFlags = f
+}
+
+// UnmarshalFlags returns the original unmarshalInputFlags.
+func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags {
+	return lazy.unmarshalFlags
+}
+
+// AllowedPartial returns true if the user originally unmarshalled this message with
+// AllowPartial set to true
+func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool {
+	return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0
+}
+
+func protoFieldNumber(tag uint32) uint32 {
+	return tag >> 3
+}
+
+// buildIndex builds an index of the specified protobuf, return the index
+// array and an error.
+func buildIndex(buf []byte) ([]IndexEntry, error) {
+	index := make([]IndexEntry, 0, 16)
+	var lastProtoFieldNum uint32
+	var outOfOrder bool
+
+	var r BufferReader = NewBufferReader(buf)
+
+	for !r.Done() {
+		var tag uint32
+		var err error
+		var curPos = r.Pos
+		// INLINED: tag, err = r.DecodeVarint32()
+		{
+			i := r.Pos
+			buf := r.Buf
+
+			if i >= len(buf) {
+				return nil, errOutOfBounds
+			} else if buf[i] < 0x80 {
+				r.Pos++
+				tag = uint32(buf[i])
+			} else if r.Remaining() < 5 {
+				var v uint64
+				v, err = r.DecodeVarintSlow()
+				tag = uint32(v)
+			} else {
+				var v uint32
+				// we already checked the first byte
+				tag = uint32(buf[i]) & 127
+				i++
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 7
+				if v < 128 {
+					goto done
+				}
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 14
+				if v < 128 {
+					goto done
+				}
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 21
+				if v < 128 {
+					goto done
+				}
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 28
+				if v < 128 {
+					goto done
+				}
+
+				return nil, errOutOfBounds
+
+			done:
+				r.Pos = i
+			}
+		}
+		// DONE: tag, err = r.DecodeVarint32()
+
+		fieldNum := protoFieldNumber(tag)
+		if fieldNum < lastProtoFieldNum {
+			outOfOrder = true
+		}
+
+		// Skip the current value -- will skip over an entire group as well.
+		// INLINED: err = r.SkipValue(tag)
+		wireType := tag & 0x7
+		switch protowire.Type(wireType) {
+		case protowire.VarintType:
+			// INLINED: err = r.SkipVarint()
+			i := r.Pos
+
+			if len(r.Buf)-i < 10 {
+				// Use DecodeVarintSlow() to skip while
+				// checking for buffer overflow, but ignore result
+				_, err = r.DecodeVarintSlow()
+				goto out2
+			}
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			return nil, errOverflow
+		out:
+			r.Pos = i + 1
+			// DONE: err = r.SkipVarint()
+		case protowire.Fixed64Type:
+			err = r.SkipFixed64()
+		case protowire.BytesType:
+			var n uint32
+			n, err = r.DecodeVarint32()
+			if err == nil {
+				err = r.Skip(int(n))
+			}
+		case protowire.StartGroupType:
+			err = r.SkipGroup(tag)
+		case protowire.Fixed32Type:
+			err = r.SkipFixed32()
+		default:
+			err = fmt.Errorf("Unexpected wire type (%d)", wireType)
+		}
+		// DONE: err = r.SkipValue(tag)
+
+	out2:
+		if err != nil {
+			return nil, err
+		}
+		if fieldNum != lastProtoFieldNum {
+			index = append(index, IndexEntry{FieldNum: fieldNum,
+				Start: uint32(curPos),
+				End:   uint32(r.Pos)},
+			)
+		} else {
+			index[len(index)-1].End = uint32(r.Pos)
+			index[len(index)-1].MultipleContiguous = true
+		}
+		lastProtoFieldNum = fieldNum
+	}
+	if outOfOrder {
+		sort.Slice(index, func(i, j int) bool {
+			return index[i].FieldNum < index[j].FieldNum ||
+				(index[i].FieldNum == index[j].FieldNum &&
+					index[i].Start < index[j].Start)
+		})
+	}
+	return index, nil
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) {
+	start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
+	if multipleEntries != nil {
+		for _, entry := range multipleEntries {
+			size += int(entry.End - entry.Start)
+		}
+		return size
+	}
+	if !found {
+		return 0
+	}
+	return int(end - start)
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) {
+	start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
+	if multipleEntries != nil {
+		for _, entry := range multipleEntries {
+			b = append(b, lazy.Protobuf[entry.Start:entry.End]...)
+		}
+		return b, true
+	}
+	if !found {
+		return nil, false
+	}
+	b = append(b, lazy.Protobuf[start:end]...)
+	return b, true
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) {
+	atomicStoreIndex(&lazy.index, &index)
+}
+
+// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information
+// (including protobuf), returns startOffset/endOffset/found.
+func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) {
+	if lazy.Protobuf == nil {
+		// There is no backing protobuf for this message -- it was made from a builder
+		return 0, 0, false, false, nil
+	}
+	index := atomicLoadIndex(&lazy.index)
+	if index == nil {
+		r, err := buildIndex(lazy.Protobuf)
+		if err != nil {
+			panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err))
+		}
+		// lazy.index is a pointer to the slice returned by BuildIndex
+		index = &r
+		atomicStoreIndex(&lazy.index, index)
+	}
+	return lookupField(index, fieldNum)
+}
+
+// lookupField returns the offset at which the indicated field starts using
+// the index, offset immediately after field ends (including all instances of
+// a repeated field), and bools indicating if field was found and if there
+// are multiple encodings of the field in the byte range.
+//
+// To hande the uncommon case where there are repeated encodings for the same
+// field which are not consecutive in the protobuf (so we need to returns
+// multiple start/end offsets), we also return a slice multipleEntries.  If
+// multipleEntries is non-nil, then multiple entries were found, and the
+// values in the slice should be used, rather than start/end/found.
+func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) {
+	// The pointer indexp to the index was already loaded atomically.
+	// The slice is uniquely associated with the pointer, so it doesn't
+	// need to be loaded atomically.
+	index := *indexp
+	for i, entry := range index {
+		if fieldNum == entry.FieldNum {
+			if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum {
+				// Handle the uncommon case where there are
+				// repeated entries for the same field which
+				// are not contiguous in the protobuf.
+				multiple := make([]IndexEntry, 1, 2)
+				multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous}
+				i++
+				for i < len(index) && index[i].FieldNum == fieldNum {
+					multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous})
+					i++
+				}
+				return 0, 0, false, false, multiple
+
+			}
+			return entry.Start, entry.End, true, entry.MultipleContiguous, nil
+		}
+		if fieldNum < entry.FieldNum {
+			return 0, 0, false, false, nil
+		}
+	}
+	return 0, 0, false, false, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
new file mode 100644
index 00000000..dc2a64ca
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protolazy
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry {
+	return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
deleted file mode 100644
index a1f6f333..00000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package strs
-
-import pref "google.golang.org/protobuf/reflect/protoreflect"
-
-func UnsafeString(b []byte) string {
-	return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
-	return []byte(s)
-}
-
-type Builder struct{}
-
-func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
-	return prefix.Append(name)
-}
-
-func (*Builder) MakeString(b []byte) string {
-	return string(b)
-}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
index a008acd0..832a7988 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
 
 package strs
 
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
index 60166f2b..1ffddf68 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
 
 package strs
 
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index a3cba508..01efc330 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@ import (
 //  10. Send out the CL for review and submit it.
 const (
 	Major      = 1
-	Minor      = 34
-	Patch      = 1
+	Minor      = 36
+	Patch      = 5
 	PreRelease = ""
 )
 
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index d75a6534..4cbf1aea 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -8,7 +8,6 @@ import (
 	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/encoding/messageset"
 	"google.golang.org/protobuf/internal/errors"
-	"google.golang.org/protobuf/internal/flags"
 	"google.golang.org/protobuf/internal/genid"
 	"google.golang.org/protobuf/internal/pragma"
 	"google.golang.org/protobuf/reflect/protoreflect"
@@ -47,6 +46,12 @@ type UnmarshalOptions struct {
 	// RecursionLimit limits how deeply messages may be nested.
 	// If zero, a default limit is applied.
 	RecursionLimit int
+
+	//
+	// NoLazyDecoding turns off lazy decoding, which otherwise is enabled by
+	// default. Lazy decoding only affects submessages (annotated with [lazy =
+	// true] in the .proto file) within messages that use the Opaque API.
+	NoLazyDecoding bool
 }
 
 // Unmarshal parses the wire-format message in b and places the result in m.
@@ -104,6 +109,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto
 		if o.DiscardUnknown {
 			in.Flags |= protoiface.UnmarshalDiscardUnknown
 		}
+
+		if !allowPartial {
+			// This does not affect how current unmarshal functions work, it just allows them
+			// to record this for lazy the decoding case.
+			in.Flags |= protoiface.UnmarshalCheckRequired
+		}
+		if o.NoLazyDecoding {
+			in.Flags |= protoiface.UnmarshalNoLazyDecoding
+		}
+
 		out, err = methods.Unmarshal(in)
 	} else {
 		o.RecursionLimit--
@@ -156,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message)
 		var err error
 		if fd == nil {
 			err = errUnknown
-		} else if flags.ProtoLegacy {
-			if fd.IsWeak() && fd.Message().IsPlaceholder() {
-				err = errUnknown // weak referent is not linked in
-			}
 		}
 
 		// Parse the field value.
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index 1f847bcc..f0473c58 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -63,7 +63,8 @@ type MarshalOptions struct {
 	// options (except for UseCachedSize itself).
 	//
 	// 2. The message and all its submessages have not changed in any
-	// way since the Size call.
+	// way since the Size call. For lazily decoded messages, accessing
+	// a message results in decoding the message, which is a change.
 	//
 	// If either of these invariants is violated,
 	// the results are undefined and may include panics or corrupted output.
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 1a0be1b0..c36d4a9c 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -8,6 +8,7 @@ import (
 	"reflect"
 
 	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoiface"
 )
 
 // Equal reports whether two messages are equal,
@@ -51,6 +52,14 @@ func Equal(x, y Message) bool {
 	if mx.IsValid() != my.IsValid() {
 		return false
 	}
+
+	// Only one of the messages needs to implement the fast-path for it to work.
+	pmx := protoMethods(mx)
+	pmy := protoMethods(my)
+	if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil {
+		return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal
+	}
+
 	vx := protoreflect.ValueOfMessage(mx)
 	vy := protoreflect.ValueOfMessage(my)
 	return vx.Equal(vy)
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index c9c8721a..78445d11 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -39,7 +39,49 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) {
 // If the field is unpopulated, it returns the default value for
 // scalars and an immutable, empty value for lists or messages.
 // It panics if xt does not extend m.
-func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+//	╔═══════════════════╤═════════════════════════╗
+//	║ Go type           │ Protobuf kind           ║
+//	╠═══════════════════╪═════════════════════════╣
+//	║ bool              │ bool                    ║
+//	║ int32             │ int32, sint32, sfixed32 ║
+//	║ int64             │ int64, sint64, sfixed64 ║
+//	║ uint32            │ uint32, fixed32         ║
+//	║ uint64            │ uint64, fixed64         ║
+//	║ float32           │ float                   ║
+//	║ float64           │ double                  ║
+//	║ string            │ string                  ║
+//	║ []byte            │ bytes                   ║
+//	║ protoreflect.Enum │ enum                    ║
+//	║ proto.Message     │ message, group          ║
+//	╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// GetExtension, then the call should be followed immediately by a
+// type assertion to the expected output value. For example:
+//
+//	mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage)
+//
+// This pattern enables static analysis tools to verify that the asserted type
+// matches the Go type associated with the extension field and
+// also enables a possible future migration to a type-safe extension API.
+//
+// Since singular messages are the most common extension type, the pattern of
+// calling HasExtension followed by GetExtension may be simplified to:
+//
+//	if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil {
+//	    ... // make use of mm
+//	}
+//
+// The mm variable is non-nil if and only if HasExtension reports true.
+func GetExtension(m Message, xt protoreflect.ExtensionType) any {
 	// Treat nil message interface as an empty message; return the default.
 	if m == nil {
 		return xt.InterfaceOf(xt.Zero())
@@ -51,7 +93,36 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
 // SetExtension stores the value of an extension field.
 // It panics if m is invalid, xt does not extend m, or if type of v
 // is invalid for the specified extension field.
-func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+//	╔═══════════════════╤═════════════════════════╗
+//	║ Go type           │ Protobuf kind           ║
+//	╠═══════════════════╪═════════════════════════╣
+//	║ bool              │ bool                    ║
+//	║ int32             │ int32, sint32, sfixed32 ║
+//	║ int64             │ int64, sint64, sfixed64 ║
+//	║ uint32            │ uint32, fixed32         ║
+//	║ uint64            │ uint64, fixed64         ║
+//	║ float32           │ float                   ║
+//	║ float64           │ double                  ║
+//	║ string            │ string                  ║
+//	║ []byte            │ bytes                   ║
+//	║ protoreflect.Enum │ enum                    ║
+//	║ proto.Message     │ message, group          ║
+//	╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// SetExtension (e.g., foopb.E_MyExtension), then the value should be a
+// concrete type that matches the expected Go type for the extension descriptor
+// so that static analysis tools can verify type correctness.
+// This also enables a possible future migration to a type-safe extension API.
+func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
 	xd := xt.TypeDescriptor()
 	pv := xt.ValueOf(v)
 
@@ -78,7 +149,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
 // It returns immediately if f returns false.
 // While iterating, mutating operations may only be performed
 // on the current extension field.
-func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) {
+func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) {
 	// Treat nil message interface as an empty message; nothing to range over.
 	if m == nil {
 		return
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
index 052fb5ae..c8675806 100644
--- a/vendor/google.golang.org/protobuf/proto/size.go
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -12,11 +12,19 @@ import (
 )
 
 // Size returns the size in bytes of the wire-format encoding of m.
+//
+// Note that Size might return more bytes than Marshal will write in the case of
+// lazily decoded messages that arrive in non-minimal wire format: see
+// https://protobuf.dev/reference/go/size/ for more details.
 func Size(m Message) int {
 	return MarshalOptions{}.Size(m)
 }
 
 // Size returns the size in bytes of the wire-format encoding of m.
+//
+// Note that Size might return more bytes than Marshal will write in the case of
+// lazily decoded messages that arrive in non-minimal wire format: see
+// https://protobuf.dev/reference/go/size/ for more details.
 func (o MarshalOptions) Size(m Message) int {
 	// Treat a nil message interface as an empty message; nothing to output.
 	if m == nil {
diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
new file mode 100644
index 00000000..267fd0f1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
@@ -0,0 +1,80 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// ValueOrNil returns nil if has is false, or a pointer to a new variable
+// containing the value returned by the specified getter.
+//
+// This function is similar to the wrappers (proto.Int32(), proto.String(),
+// etc.), but is generic (works for any field type) and works with the hasser
+// and getter of a field, as opposed to a value.
+//
+// This is convenient when populating builder fields.
+//
+// Example:
+//
+//	hop := attr.GetDirectHop()
+//	injectedRoute := ripb.InjectedRoute_builder{
+//	  Prefixes: route.GetPrefixes(),
+//	  NextHop:  proto.ValueOrNil(hop.HasAddress(), hop.GetAddress),
+//	}
+func ValueOrNil[T any](has bool, getter func() T) *T {
+	if !has {
+		return nil
+	}
+	v := getter()
+	return &v
+}
+
+// ValueOrDefault returns the protobuf message val if val is not nil, otherwise
+// it returns a pointer to an empty val message.
+//
+// This function allows for translating code from the old Open Struct API to the
+// new Opaque API.
+//
+// The old Open Struct API represented oneof fields with a wrapper struct:
+//
+//	var signedImg *accountpb.SignedImage
+//	profile := &accountpb.Profile{
+//		// The Avatar oneof will be set, with an empty SignedImage.
+//		Avatar: &accountpb.Profile_SignedImage{signedImg},
+//	}
+//
+// The new Opaque API treats oneof fields like regular fields, there are no more
+// wrapper structs:
+//
+//	var signedImg *accountpb.SignedImage
+//	profile := &accountpb.Profile{}
+//	profile.SetSignedImage(signedImg)
+//
+// For convenience, the Opaque API also offers Builders, which allow for a
+// direct translation of struct initialization. However, because Builders use
+// nilness to represent field presence (but there is no non-nil wrapper struct
+// anymore), Builders cannot distinguish between an unset oneof and a set oneof
+// with nil message. The above code would need to be translated with help of the
+// ValueOrDefault function to retain the same behavior:
+//
+//	var signedImg *accountpb.SignedImage
+//	return &accountpb.Profile_builder{
+//		SignedImage: proto.ValueOrDefault(signedImg),
+//	}.Build()
+func ValueOrDefault[T interface {
+	*P
+	Message
+}, P any](val T) T {
+	if val == nil {
+		return T(new(P))
+	}
+	return val
+}
+
+// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of
+// type []byte.
+func ValueOrDefaultBytes(val []byte) []byte {
+	if val == nil {
+		return []byte{}
+	}
+	return val
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index 8fbecb4f..823dbf3b 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -13,6 +13,8 @@
 package protodesc
 
 import (
+	"strings"
+
 	"google.golang.org/protobuf/internal/editionssupport"
 	"google.golang.org/protobuf/internal/errors"
 	"google.golang.org/protobuf/internal/filedesc"
@@ -102,13 +104,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
 	default:
 		return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
 	}
-	if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
-		return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
-	}
 	f.L1.Path = fd.GetName()
 	if f.L1.Path == "" {
 		return nil, errors.New("file path must be populated")
 	}
+	if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
+		// Allow cmd/protoc-gen-go/testdata to use any edition for easier
+		// testing of upcoming edition features.
+		if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") {
+			return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
+		}
+	}
 	f.L1.Package = protoreflect.FullName(fd.GetPackage())
 	if !f.L1.Package.IsValid() && f.L1.Package != "" {
 		return nil, errors.New("invalid package: %q", f.L1.Package)
@@ -126,17 +132,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
 		}
 		f.L2.Imports[i].IsPublic = true
 	}
-	for _, i := range fd.GetWeakDependency() {
-		if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak {
-			return nil, errors.New("invalid or duplicate weak import index: %d", i)
-		}
-		f.L2.Imports[i].IsWeak = true
-	}
 	imps := importSet{f.Path(): true}
 	for i, path := range fd.GetDependency() {
 		imp := &f.L2.Imports[i]
 		f, err := r.FindFileByPath(path)
-		if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) {
+		if err == protoregistry.NotFound && o.AllowUnresolvable {
 			f = filedesc.PlaceholderFile(path)
 		} else if err != nil {
 			return nil, errors.New("could not resolve import %q: %v", path, err)
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index 85617554..9da34998 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -149,7 +149,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
 		if opts := fd.GetOptions(); opts != nil {
 			opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
 			f.L1.Options = func() protoreflect.ProtoMessage { return opts }
-			f.L1.IsWeak = opts.GetWeak()
+			f.L1.IsLazy = opts.GetLazy()
 			if opts.Packed != nil {
 				f.L1.EditionFeatures.IsPacked = opts.GetPacked()
 			}
@@ -214,6 +214,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript
 		if xd.JsonName != nil {
 			x.L2.StringName.InitJSON(xd.GetJsonName())
 		}
+		if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded {
+			x.L1.Kind = protoreflect.GroupKind
+		}
 	}
 	return xs, nil
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
index 254ca585..ff692436 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
@@ -43,9 +43,14 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc
 				o.L1.Fields.List = append(o.L1.Fields.List, f)
 			}
 
-			if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil {
+			if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil {
 				return errors.New("message field %q cannot resolve type: %v", f.FullName(), err)
 			}
+			if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) {
+				// A map field might inherit delimited encoding from a file-wide default feature.
+				// But maps never actually use delimited encoding. (At least for now...)
+				f.L1.Kind = protoreflect.MessageKind
+			}
 			if fd.DefaultValue != nil {
 				v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable)
 				if err != nil {
@@ -68,10 +73,10 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc
 func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) {
 	for i, xd := range xds {
 		x := &xs[i]
-		if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil {
+		if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil {
 			return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err)
 		}
-		if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil {
+		if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil {
 			return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err)
 		}
 		if xd.DefaultValue != nil {
@@ -90,11 +95,11 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc
 		s := &ss[i]
 		for j, md := range sd.GetMethod() {
 			m := &s.L2.Methods.List[j]
-			m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false)
+			m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()))
 			if err != nil {
 				return errors.New("service method %q cannot resolve input: %v", m.FullName(), err)
 			}
-			m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false)
+			m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()))
 			if err != nil {
 				return errors.New("service method %q cannot resolve output: %v", m.FullName(), err)
 			}
@@ -106,16 +111,16 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc
 // findTarget finds an enum or message descriptor if k is an enum, message,
 // group, or unknown. If unknown, and the name could be resolved, the kind
 // returned kind is set based on the type of the resolved descriptor.
-func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
+func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
 	switch k {
 	case protoreflect.EnumKind:
-		ed, err := r.findEnumDescriptor(scope, ref, isWeak)
+		ed, err := r.findEnumDescriptor(scope, ref)
 		if err != nil {
 			return 0, nil, nil, err
 		}
 		return k, ed, nil, nil
 	case protoreflect.MessageKind, protoreflect.GroupKind:
-		md, err := r.findMessageDescriptor(scope, ref, isWeak)
+		md, err := r.findMessageDescriptor(scope, ref)
 		if err != nil {
 			return 0, nil, nil, err
 		}
@@ -124,7 +129,7 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName,
 		// Handle unspecified kinds (possible with parsers that operate
 		// on a per-file basis without knowledge of dependencies).
 		d, err := r.findDescriptor(scope, ref)
-		if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+		if err == protoregistry.NotFound && r.allowUnresolvable {
 			return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil
 		} else if err == protoregistry.NotFound {
 			return 0, nil, nil, errors.New("%q not found", ref.FullName())
@@ -201,9 +206,9 @@ func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName)
 	}
 }
 
-func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) {
+func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) {
 	d, err := r.findDescriptor(scope, ref)
-	if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+	if err == protoregistry.NotFound && r.allowUnresolvable {
 		return filedesc.PlaceholderEnum(ref.FullName()), nil
 	} else if err == protoregistry.NotFound {
 		return nil, errors.New("%q not found", ref.FullName())
@@ -217,9 +222,9 @@ func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialNa
 	return ed, nil
 }
 
-func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) {
+func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) {
 	d, err := r.findDescriptor(scope, ref)
-	if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+	if err == protoregistry.NotFound && r.allowUnresolvable {
 		return filedesc.PlaceholderMessage(ref.FullName()), nil
 	} else if err == protoregistry.NotFound {
 		return nil, errors.New("%q not found", ref.FullName())
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
index c6293086..c343d922 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
@@ -116,18 +116,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds
 			if m.ExtensionRanges().Len() > 0 {
 				return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName())
 			}
-			// Verify that field names in proto3 do not conflict if lowercased
-			// with all underscores removed.
-			// See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847
-			names := map[string]protoreflect.FieldDescriptor{}
-			for i := 0; i < m.Fields().Len(); i++ {
-				f1 := m.Fields().Get(i)
-				s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1)
-				if f2, ok := names[s]; ok {
-					return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name())
-				}
-				names[s] = f1
-			}
 		}
 
 		for j, fd := range md.GetField() {
@@ -161,12 +149,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds
 					return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName())
 				}
 			}
-			if f.IsWeak() && !flags.ProtoLegacy {
-				return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName())
-			}
-			if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) {
-				return errors.New("message field %q may only be weak for an optional message", f.FullName())
-			}
 			if f.IsPacked() && !isPackable(f) {
 				return errors.New("message field %q is not packable", f.FullName())
 			}
@@ -211,9 +193,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds
 				if f.Cardinality() != protoreflect.Optional {
 					return errors.New("message field %q belongs in a oneof and must be optional", f.FullName())
 				}
-				if f.IsWeak() {
-					return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName())
-				}
 			}
 		}
 
@@ -266,9 +245,6 @@ func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xd
 				return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number())
 			}
 		}
-		if xd.GetOptions().GetWeak() {
-			return errors.New("extension field %q cannot be a weak reference", x.FullName())
-		}
 		if x.IsPacked() && !isPackable(x) {
 			return errors.New("extension field %q is not packable", x.FullName())
 		}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index 804830ed..697a61b2 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -11,10 +11,11 @@ import (
 
 	"google.golang.org/protobuf/internal/editiondefaults"
 	"google.golang.org/protobuf/internal/filedesc"
+	"google.golang.org/protobuf/internal/genid"
 	"google.golang.org/protobuf/proto"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	"google.golang.org/protobuf/types/descriptorpb"
-	gofeaturespb "google.golang.org/protobuf/types/gofeaturespb"
+	"google.golang.org/protobuf/types/gofeaturespb"
 )
 
 var defaults = &descriptorpb.FeatureSetDefaults{}
@@ -43,6 +44,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
 		return descriptorpb.Edition_EDITION_PROTO3
 	case filedesc.Edition2023:
 		return descriptorpb.Edition_EDITION_2023
+	case filedesc.Edition2024:
+		return descriptorpb.Edition_EDITION_2024
 	default:
 		panic(fmt.Sprintf("unknown value for edition: %v", ed))
 	}
@@ -123,10 +126,43 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp
 		parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW
 	}
 
-	if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil {
-		if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil {
-			parentFS.GenerateLegacyUnmarshalJSON = *luje
-		}
+	// We must not use proto.GetExtension(child, gofeaturespb.E_Go)
+	// because that only works for messages we generated, but not for
+	// dynamicpb messages. See golang/protobuf#1669.
+	//
+	// Further, we harden this code against adversarial inputs: a
+	// service which accepts descriptors from a possibly malicious
+	// source shouldn't crash.
+	goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor())
+	if !goFeatures.IsValid() {
+		return parentFS
+	}
+	gf, ok := goFeatures.Interface().(protoreflect.Message)
+	if !ok {
+		return parentFS
+	}
+	// gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures.
+	fields := gf.Descriptor().Fields()
+
+	if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil &&
+		!fd.IsList() &&
+		fd.Kind() == protoreflect.BoolKind &&
+		gf.Has(fd) {
+		parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool()
+	}
+
+	if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil &&
+		!fd.IsList() &&
+		fd.Kind() == protoreflect.EnumKind &&
+		gf.Has(fd) {
+		parentFS.StripEnumPrefix = int(gf.Get(fd).Enum())
+	}
+
+	if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil &&
+		!fd.IsList() &&
+		fd.Kind() == protoreflect.EnumKind &&
+		gf.Has(fd) {
+		parentFS.APILevel = int(gf.Get(fd).Enum())
 	}
 
 	return parentFS
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index a5de8d40..9b880aa8 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -32,9 +32,6 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD
 		if imp.IsPublic {
 			p.PublicDependency = append(p.PublicDependency, int32(i))
 		}
-		if imp.IsWeak {
-			p.WeakDependency = append(p.WeakDependency, int32(i))
-		}
 	}
 	for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ {
 		loc := locs.Get(i)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
index d5d5af6e..742cb518 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -23,6 +23,7 @@ type (
 		Unmarshal        func(unmarshalInput) (unmarshalOutput, error)
 		Merge            func(mergeInput) mergeOutput
 		CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+		Equal            func(equalInput) equalOutput
 	}
 	supportFlags = uint64
 	sizeInput    = struct {
@@ -75,4 +76,13 @@ type (
 	checkInitializedOutput = struct {
 		pragma.NoUnkeyedLiterals
 	}
+	equalInput = struct {
+		pragma.NoUnkeyedLiterals
+		MessageA Message
+		MessageB Message
+	}
+	equalOutput = struct {
+		pragma.NoUnkeyedLiterals
+		Equal bool
+	}
 )
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index 00102d31..ea154eec 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -485,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte {
 		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
 	case 3:
 		b = p.appendSingularField(b, "debug_redact", nil)
+	case 4:
+		b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport)
 	case 999:
 		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
 	}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index 5b80afe5..cd7fbc87 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -68,7 +68,7 @@ type Descriptor interface {
 	// dependency is not resolved, in which case only name information is known.
 	//
 	// Placeholder types may only be returned by the following accessors
-	// as a result of unresolved dependencies or weak imports:
+	// as a result of unresolved dependencies:
 	//
 	//	╔═══════════════════════════════════╤═════════════════════╗
 	//	║ Accessor                          │ Descriptor          ║
@@ -168,11 +168,7 @@ type FileImport struct {
 	// The current file and the imported file must be within proto package.
 	IsPublic bool
 
-	// IsWeak reports whether this is a weak import, which does not impose
-	// a direct dependency on the target file.
-	//
-	// Weak imports are a legacy proto1 feature. Equivalent behavior is
-	// achieved using proto2 extension fields or proto3 Any messages.
+	// Deprecated: support for weak fields has been removed.
 	IsWeak bool
 }
 
@@ -325,9 +321,7 @@ type FieldDescriptor interface {
 	// specified in the source .proto file.
 	HasOptionalKeyword() bool
 
-	// IsWeak reports whether this is a weak field, which does not impose a
-	// direct dependency on the target type.
-	// If true, then Message returns a placeholder type.
+	// Deprecated: support for weak fields has been removed.
 	IsWeak() bool
 
 	// IsPacked reports whether repeated primitive numeric kinds should be
@@ -510,7 +504,7 @@ type ExtensionType interface {
 	//
 	// ValueOf is more extensive than protoreflect.ValueOf for a given field's
 	// value as it has more type information available.
-	ValueOf(interface{}) Value
+	ValueOf(any) Value
 
 	// InterfaceOf completely unwraps the Value to the underlying Go type.
 	// InterfaceOf panics if the input is nil or does not represent the
@@ -519,13 +513,13 @@ type ExtensionType interface {
 	//
 	// InterfaceOf is able to unwrap the Value further than Value.Interface
 	// as it has more type information available.
-	InterfaceOf(Value) interface{}
+	InterfaceOf(Value) any
 
 	// IsValidValue reports whether the Value is valid to assign to the field.
 	IsValidValue(Value) bool
 
 	// IsValidInterface reports whether the input is valid to assign to the field.
-	IsValidInterface(interface{}) bool
+	IsValidInterface(any) bool
 }
 
 // EnumDescriptor describes an enum and
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index a7b0d06f..a4b78ace 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -152,7 +152,7 @@ type Message interface {
 	// This method may return nil.
 	//
 	// The returned methods type is identical to
-	// google.golang.org/protobuf/runtime/protoiface.Methods.
+	// [google.golang.org/protobuf/runtime/protoiface.Methods].
 	// Consult the protoiface package documentation for details.
 	ProtoMethods() *methods
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
deleted file mode 100644
index 7ced876f..00000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package protoreflect
-
-import "google.golang.org/protobuf/internal/pragma"
-
-type valueType int
-
-const (
-	nilType valueType = iota
-	boolType
-	int32Type
-	int64Type
-	uint32Type
-	uint64Type
-	float32Type
-	float64Type
-	stringType
-	bytesType
-	enumType
-	ifaceType
-)
-
-// value is a union where only one type can be represented at a time.
-// This uses a distinct field for each type. This is type safe in Go, but
-// occupies more memory than necessary (72B).
-type value struct {
-	pragma.DoNotCompare // 0B
-
-	typ   valueType   // 8B
-	num   uint64      // 8B
-	str   string      // 16B
-	bin   []byte      // 24B
-	iface interface{} // 16B
-}
-
-func valueOfString(v string) Value {
-	return Value{typ: stringType, str: v}
-}
-func valueOfBytes(v []byte) Value {
-	return Value{typ: bytesType, bin: v}
-}
-func valueOfIface(v interface{}) Value {
-	return Value{typ: ifaceType, iface: v}
-}
-
-func (v Value) getString() string {
-	return v.str
-}
-func (v Value) getBytes() []byte {
-	return v.bin
-}
-func (v Value) getIface() interface{} {
-	return v.iface
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
index 16030973..9fe83cef 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -69,8 +69,8 @@ import (
 // composite Value. Modifying an empty, read-only value panics.
 type Value value
 
-// The protoreflect API uses a custom Value union type instead of interface{}
-// to keep the future open for performance optimizations. Using an interface{}
+// The protoreflect API uses a custom Value union type instead of any
+// to keep the future open for performance optimizations. Using an any
 // always incurs an allocation for primitives (e.g., int64) since it needs to
 // be boxed on the heap (as interfaces can only contain pointers natively).
 // Instead, we represent the Value union as a flat struct that internally keeps
@@ -85,7 +85,7 @@ type Value value
 // ValueOf returns a Value initialized with the concrete value stored in v.
 // This panics if the type does not match one of the allowed types in the
 // Value union.
-func ValueOf(v interface{}) Value {
+func ValueOf(v any) Value {
 	switch v := v.(type) {
 	case nil:
 		return Value{}
@@ -192,10 +192,10 @@ func (v Value) IsValid() bool {
 	return v.typ != nilType
 }
 
-// Interface returns v as an interface{}.
+// Interface returns v as an any.
 //
 // Invariant: v == ValueOf(v).Interface()
-func (v Value) Interface() interface{} {
+func (v Value) Interface() any {
 	switch v.typ {
 	case nilType:
 		return nil
@@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool {
 	return Value(k).IsValid()
 }
 
-// Interface returns k as an interface{}.
-func (k MapKey) Interface() interface{} {
+// Interface returns k as an any.
+func (k MapKey) Interface() any {
 	return Value(k).Interface()
 }
 
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
index b1fdbe3e..0015fcb3 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
 
 package protoreflect
 
@@ -45,7 +44,7 @@ var (
 
 // typeOf returns a pointer to the Go type information.
 // The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t interface{}) unsafe.Pointer {
+func typeOf(t any) unsafe.Pointer {
 	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
 }
 
@@ -80,7 +79,7 @@ func valueOfBytes(v []byte) Value {
 	p := (*sliceHeader)(unsafe.Pointer(&v))
 	return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
 }
-func valueOfIface(v interface{}) Value {
+func valueOfIface(v any) Value {
 	p := (*ifaceHeader)(unsafe.Pointer(&v))
 	return Value{typ: p.Type, ptr: p.Data}
 }
@@ -93,7 +92,7 @@ func (v Value) getBytes() (x []byte) {
 	*(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
 	return x
 }
-func (v Value) getIface() (x interface{}) {
+func (v Value) getIface() (x any) {
 	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
 	return x
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
index 43547011..479527b5 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
 
 package protoreflect
 
@@ -15,7 +14,7 @@ import (
 
 type (
 	ifaceHeader struct {
-		_    [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
+		_    [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
 		Type unsafe.Pointer
 		Data unsafe.Pointer
 	}
@@ -37,7 +36,7 @@ var (
 
 // typeOf returns a pointer to the Go type information.
 // The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t interface{}) unsafe.Pointer {
+func typeOf(t any) unsafe.Pointer {
 	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
 }
 
@@ -70,7 +69,7 @@ func valueOfString(v string) Value {
 func valueOfBytes(v []byte) Value {
 	return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
 }
-func valueOfIface(v interface{}) Value {
+func valueOfIface(v any) Value {
 	p := (*ifaceHeader)(unsafe.Pointer(&v))
 	return Value{typ: p.Type, ptr: p.Data}
 }
@@ -81,7 +80,7 @@ func (v Value) getString() string {
 func (v Value) getBytes() []byte {
 	return unsafe.Slice((*byte)(v.ptr), v.num)
 }
-func (v Value) getIface() (x interface{}) {
+func (v Value) getIface() (x any) {
 	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
 	return x
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index 6267dc52..de177733 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -95,7 +95,7 @@ type Files struct {
 	// multiple files. Only top-level declarations are registered.
 	// Note that enum values are in the top-level since that are in the same
 	// scope as the parent enum.
-	descsByName map[protoreflect.FullName]interface{}
+	descsByName map[protoreflect.FullName]any
 	filesByPath map[string][]protoreflect.FileDescriptor
 	numFiles    int
 }
@@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error {
 		defer globalMutex.Unlock()
 	}
 	if r.descsByName == nil {
-		r.descsByName = map[protoreflect.FullName]interface{}{
+		r.descsByName = map[protoreflect.FullName]any{
 			"": &packageDescriptor{},
 		}
 		r.filesByPath = make(map[string][]protoreflect.FileDescriptor)
@@ -485,7 +485,7 @@ type Types struct {
 }
 
 type (
-	typesByName         map[protoreflect.FullName]interface{}
+	typesByName         map[protoreflect.FullName]any
 	extensionsByMessage map[protoreflect.FullName]extensionsByNumber
 	extensionsByNumber  map[protoreflect.FieldNumber]protoreflect.ExtensionType
 )
@@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error {
 	return nil
 }
 
-func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error {
+func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error {
 	name := desc.FullName()
 	prev := r.typesByName[name]
 	if prev != nil {
@@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p
 	}
 }
 
-func typeName(t interface{}) string {
+func typeName(t any) string {
 	switch t.(type) {
 	case protoreflect.EnumType:
 		return "enum"
@@ -854,7 +854,7 @@ func typeName(t interface{}) string {
 	}
 }
 
-func amendErrorWithCaller(err error, prev, curr interface{}) error {
+func amendErrorWithCaller(err error, prev, curr any) error {
 	prevPkg := goPackage(prev)
 	currPkg := goPackage(curr)
 	if prevPkg == "" || currPkg == "" || prevPkg == currPkg {
@@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error {
 	return errors.New("%s\n\tpreviously from: %q\n\tcurrently from:  %q", err, prevPkg, currPkg)
 }
 
-func goPackage(v interface{}) string {
+func goPackage(v any) string {
 	switch d := v.(type) {
 	case protoreflect.EnumType:
 		v = d.Descriptor()
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
index 44cf467d..28e9e9f0 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -39,6 +39,9 @@ type Methods = struct {
 
 	// CheckInitialized returns an error if any required fields in the message are not set.
 	CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+
+	// Equal compares two messages and returns EqualOutput.Equal == true if they are equal.
+	Equal func(EqualInput) EqualOutput
 }
 
 // SupportFlags indicate support for optional features.
@@ -119,6 +122,22 @@ type UnmarshalInputFlags = uint8
 
 const (
 	UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota
+
+	// UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer.
+	// The unmarshaller must not modify the contents of the buffer.
+	UnmarshalAliasBuffer
+
+	// UnmarshalValidated indicates that validation has already been
+	// performed on the input buffer.
+	UnmarshalValidated
+
+	// UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are
+	// initialized.
+	UnmarshalCheckRequired
+
+	// UnmarshalNoLazyDecoding is set if this unmarshal operation should not use
+	// lazy decoding, even when otherwise available.
+	UnmarshalNoLazyDecoding
 )
 
 // UnmarshalOutputFlags are output from the Unmarshal method.
@@ -166,3 +185,18 @@ type CheckInitializedInput = struct {
 type CheckInitializedOutput = struct {
 	pragma.NoUnkeyedLiterals
 }
+
+// EqualInput is input to the Equal method.
+type EqualInput = struct {
+	pragma.NoUnkeyedLiterals
+
+	MessageA protoreflect.Message
+	MessageB protoreflect.Message
+}
+
+// EqualOutput is output from the Equal method.
+type EqualOutput = struct {
+	pragma.NoUnkeyedLiterals
+
+	Equal bool
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
index 4a1ab7fb..93df1b56 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
@@ -15,6 +15,7 @@ import (
 	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/internal/filetype"
 	"google.golang.org/protobuf/internal/impl"
+	"google.golang.org/protobuf/internal/protolazy"
 )
 
 // UnsafeEnabled specifies whether package unsafe can be used.
@@ -39,6 +40,9 @@ type (
 	ExtensionFieldV1 = impl.ExtensionField
 
 	Pointer = impl.Pointer
+
+	LazyUnmarshalInfo  = *protolazy.XXX_lazyUnmarshalInfo
+	RaceDetectHookData = impl.RaceDetectHookData
 )
 
 var X impl.Export
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 10c9030e..a5163376 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -46,6 +46,7 @@ import (
 	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
 	reflect "reflect"
 	sync "sync"
+	unsafe "unsafe"
 )
 
 // The full set of known editions.
@@ -69,7 +70,7 @@ const (
 	Edition_EDITION_2023 Edition = 1000
 	Edition_EDITION_2024 Edition = 1001
 	// Placeholder editions for testing feature resolution.  These should not be
-	// used or relyed on outside of tests.
+	// used or relied on outside of tests.
 	Edition_EDITION_1_TEST_ONLY     Edition = 1
 	Edition_EDITION_2_TEST_ONLY     Edition = 2
 	Edition_EDITION_99997_TEST_ONLY Edition = 99997
@@ -577,8 +578,6 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
 }
 
 // If set to RETENTION_SOURCE, the option will be omitted from the binary.
-// Note: as of January 2023, support for this is in progress and does not yet
-// have an effect (b/264593489).
 type FieldOptions_OptionRetention int32
 
 const (
@@ -640,8 +639,7 @@ func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) {
 
 // This indicates the types of entities that the field may apply to when used
 // as an option. If it is unset, then the field may be freely used as an
-// option on any kind of entity. Note: as of January 2023, support for this is
-// in progress and does not yet have an effect (b/264593489).
+// option on any kind of entity.
 type FieldOptions_OptionTargetType int32
 
 const (
@@ -1208,20 +1206,18 @@ func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) {
 // The protocol compiler can output a FileDescriptorSet containing the .proto
 // files it parses.
 type FileDescriptorSet struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
-	File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+	state           protoimpl.MessageState `protogen:"open.v1"`
+	File            []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+	extensionFields protoimpl.ExtensionFields
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
 }
 
 func (x *FileDescriptorSet) Reset() {
 	*x = FileDescriptorSet{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FileDescriptorSet) String() string {
@@ -1232,7 +1228,7 @@ func (*FileDescriptorSet) ProtoMessage() {}
 
 func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1256,12 +1252,9 @@ func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto {
 
 // Describes a complete .proto file.
 type FileDescriptorProto struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
-	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`       // file name, relative to root of source tree
-	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
+	state   protoimpl.MessageState `protogen:"open.v1"`
+	Name    *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`       // file name, relative to root of source tree
+	Package *string                `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
 	// Names of files imported by this file.
 	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
 	// Indexes of the public imported files in the dependency list above.
@@ -1286,16 +1279,16 @@ type FileDescriptorProto struct {
 	// If `edition` is present, this value must be "editions".
 	Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
 	// The edition of the proto file.
-	Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+	Edition       *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *FileDescriptorProto) Reset() {
 	*x = FileDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FileDescriptorProto) String() string {
@@ -1306,7 +1299,7 @@ func (*FileDescriptorProto) ProtoMessage() {}
 
 func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1414,10 +1407,7 @@ func (x *FileDescriptorProto) GetEdition() Edition {
 
 // Describes a message type.
 type DescriptorProto struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state          protoimpl.MessageState            `protogen:"open.v1"`
 	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
 	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
 	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
@@ -1429,16 +1419,16 @@ type DescriptorProto struct {
 	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved field names, which may not be used by fields in the same message.
 	// A given name may only be reserved once.
-	ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	ReservedName  []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *DescriptorProto) Reset() {
 	*x = DescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *DescriptorProto) String() string {
@@ -1449,7 +1439,7 @@ func (*DescriptorProto) ProtoMessage() {}
 
 func (x *DescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1535,11 +1525,7 @@ func (x *DescriptorProto) GetReservedName() []string {
 }
 
 type ExtensionRangeOptions struct {
-	state           protoimpl.MessageState
-	sizeCache       protoimpl.SizeCache
-	unknownFields   protoimpl.UnknownFields
-	extensionFields protoimpl.ExtensionFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
 	// For external users: DO NOT USE. We are in the process of open sourcing
@@ -1551,7 +1537,10 @@ type ExtensionRangeOptions struct {
 	// The verification state of the range.
 	// TODO: flip the default to DECLARATION once all empty ranges
 	// are marked as UNVERIFIED.
-	Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
+	Verification    *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
+	extensionFields protoimpl.ExtensionFields
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
 }
 
 // Default values for ExtensionRangeOptions fields.
@@ -1561,11 +1550,9 @@ const (
 
 func (x *ExtensionRangeOptions) Reset() {
 	*x = ExtensionRangeOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ExtensionRangeOptions) String() string {
@@ -1576,7 +1563,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {}
 
 func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1621,10 +1608,7 @@ func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_Verifica
 
 // Describes a field within a message.
 type FieldDescriptorProto struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state  protoimpl.MessageState      `protogen:"open.v1"`
 	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
 	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
 	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
@@ -1676,15 +1660,15 @@ type FieldDescriptorProto struct {
 	// Proto2 optional fields do not set this flag, because they already indicate
 	// optional with `LABEL_OPTIONAL`.
 	Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"`
+	unknownFields  protoimpl.UnknownFields
+	sizeCache      protoimpl.SizeCache
 }
 
 func (x *FieldDescriptorProto) Reset() {
 	*x = FieldDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FieldDescriptorProto) String() string {
@@ -1695,7 +1679,7 @@ func (*FieldDescriptorProto) ProtoMessage() {}
 
 func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1789,21 +1773,18 @@ func (x *FieldDescriptorProto) GetProto3Optional() bool {
 
 // Describes a oneof.
 type OneofDescriptorProto struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	Name          *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Options       *OneofOptions          `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
 	unknownFields protoimpl.UnknownFields
-
-	Name    *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *OneofDescriptorProto) Reset() {
 	*x = OneofDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *OneofDescriptorProto) String() string {
@@ -1814,7 +1795,7 @@ func (*OneofDescriptorProto) ProtoMessage() {}
 
 func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1845,10 +1826,7 @@ func (x *OneofDescriptorProto) GetOptions() *OneofOptions {
 
 // Describes an enum type.
 type EnumDescriptorProto struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state   protoimpl.MessageState      `protogen:"open.v1"`
 	Name    *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
 	Value   []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
 	Options *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
@@ -1858,16 +1836,16 @@ type EnumDescriptorProto struct {
 	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved enum value names, which may not be reused. A given name may only
 	// be reserved once.
-	ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	ReservedName  []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *EnumDescriptorProto) Reset() {
 	*x = EnumDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *EnumDescriptorProto) String() string {
@@ -1878,7 +1856,7 @@ func (*EnumDescriptorProto) ProtoMessage() {}
 
 func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1930,22 +1908,19 @@ func (x *EnumDescriptorProto) GetReservedName() []string {
 
 // Describes a value within an enum.
 type EnumValueDescriptorProto struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	Name          *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number        *int32                 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+	Options       *EnumValueOptions      `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
 	unknownFields protoimpl.UnknownFields
-
-	Name    *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Number  *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
-	Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *EnumValueDescriptorProto) Reset() {
 	*x = EnumValueDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *EnumValueDescriptorProto) String() string {
@@ -1956,7 +1931,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {}
 
 func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1994,22 +1969,19 @@ func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
 
 // Describes a service.
 type ServiceDescriptorProto struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
+	state         protoimpl.MessageState   `protogen:"open.v1"`
+	Name          *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Method        []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+	Options       *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
 	unknownFields protoimpl.UnknownFields
-
-	Name    *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Method  []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
-	Options *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *ServiceDescriptorProto) Reset() {
 	*x = ServiceDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ServiceDescriptorProto) String() string {
@@ -2020,7 +1992,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {}
 
 func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2058,11 +2030,8 @@ func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions {
 
 // Describes a method of a service.
 type MethodDescriptorProto struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
-	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	state protoimpl.MessageState `protogen:"open.v1"`
+	Name  *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
 	// Input and output type names.  These are resolved in the same way as
 	// FieldDescriptorProto.type_name, but must refer to a message type.
 	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
@@ -2072,6 +2041,8 @@ type MethodDescriptorProto struct {
 	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
 	// Identifies if server streams multiple server messages
 	ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
 }
 
 // Default values for MethodDescriptorProto fields.
@@ -2082,11 +2053,9 @@ const (
 
 func (x *MethodDescriptorProto) Reset() {
 	*x = MethodDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *MethodDescriptorProto) String() string {
@@ -2097,7 +2066,7 @@ func (*MethodDescriptorProto) ProtoMessage() {}
 
 func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2155,11 +2124,7 @@ func (x *MethodDescriptorProto) GetServerStreaming() bool {
 }
 
 type FileOptions struct {
-	state           protoimpl.MessageState
-	sizeCache       protoimpl.SizeCache
-	unknownFields   protoimpl.UnknownFields
-	extensionFields protoimpl.ExtensionFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Sets the Java package where classes generated from this .proto will be
 	// placed.  By default, the proto package is used, but this is often
 	// inappropriate because proto packages do not normally start with backwards
@@ -2251,6 +2216,9 @@ type FileOptions struct {
 	// The parser stores options it doesn't recognize here.
 	// See the documentation for the "Options" section above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	extensionFields     protoimpl.ExtensionFields
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for FileOptions fields.
@@ -2267,11 +2235,9 @@ const (
 
 func (x *FileOptions) Reset() {
 	*x = FileOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FileOptions) String() string {
@@ -2282,7 +2248,7 @@ func (*FileOptions) ProtoMessage() {}
 
 func (x *FileOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2446,11 +2412,7 @@ func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type MessageOptions struct {
-	state           protoimpl.MessageState
-	sizeCache       protoimpl.SizeCache
-	unknownFields   protoimpl.UnknownFields
-	extensionFields protoimpl.ExtensionFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Set true to use the old proto1 MessageSet wire format for extensions.
 	// This is provided for backwards-compatibility with the MessageSet wire
 	// format.  You should not use this for any other reason:  It's less
@@ -2523,6 +2485,9 @@ type MessageOptions struct {
 	Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	extensionFields     protoimpl.ExtensionFields
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for MessageOptions fields.
@@ -2534,11 +2499,9 @@ const (
 
 func (x *MessageOptions) Reset() {
 	*x = MessageOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *MessageOptions) String() string {
@@ -2549,7 +2512,7 @@ func (*MessageOptions) ProtoMessage() {}
 
 func (x *MessageOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2615,17 +2578,14 @@ func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type FieldOptions struct {
-	state           protoimpl.MessageState
-	sizeCache       protoimpl.SizeCache
-	unknownFields   protoimpl.UnknownFields
-	extensionFields protoimpl.ExtensionFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
+	// NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead.
 	// The ctype option instructs the C++ code generator to use a different
 	// representation of the field than it normally would.  See the specific
 	// options below.  This option is only implemented to support use of
 	// [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of
-	// type "bytes" in the open source release -- sorry, we'll try to include
-	// other types in a future version!
+	// type "bytes" in the open source release.
+	// TODO: make ctype actually deprecated.
 	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
 	// The packed option can be enabled for repeated primitive fields to enable
 	// a more efficient representation on the wire. Rather than repeatedly
@@ -2692,6 +2652,9 @@ type FieldOptions struct {
 	FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	extensionFields     protoimpl.ExtensionFields
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for FieldOptions fields.
@@ -2707,11 +2670,9 @@ const (
 
 func (x *FieldOptions) Reset() {
 	*x = FieldOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FieldOptions) String() string {
@@ -2722,7 +2683,7 @@ func (*FieldOptions) ProtoMessage() {}
 
 func (x *FieldOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2836,24 +2797,21 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type OneofOptions struct {
-	state           protoimpl.MessageState
-	sizeCache       protoimpl.SizeCache
-	unknownFields   protoimpl.UnknownFields
-	extensionFields protoimpl.ExtensionFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Any features defined in the specific edition.
 	Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	extensionFields     protoimpl.ExtensionFields
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
 }
 
 func (x *OneofOptions) Reset() {
 	*x = OneofOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *OneofOptions) String() string {
@@ -2864,7 +2822,7 @@ func (*OneofOptions) ProtoMessage() {}
 
 func (x *OneofOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2894,11 +2852,7 @@ func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type EnumOptions struct {
-	state           protoimpl.MessageState
-	sizeCache       protoimpl.SizeCache
-	unknownFields   protoimpl.UnknownFields
-	extensionFields protoimpl.ExtensionFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Set this option to true to allow mapping different tag names to the same
 	// value.
 	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
@@ -2920,6 +2874,9 @@ type EnumOptions struct {
 	Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	extensionFields     protoimpl.ExtensionFields
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for EnumOptions fields.
@@ -2929,11 +2886,9 @@ const (
 
 func (x *EnumOptions) Reset() {
 	*x = EnumOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *EnumOptions) String() string {
@@ -2944,7 +2899,7 @@ func (*EnumOptions) ProtoMessage() {}
 
 func (x *EnumOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2996,11 +2951,7 @@ func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type EnumValueOptions struct {
-	state           protoimpl.MessageState
-	sizeCache       protoimpl.SizeCache
-	unknownFields   protoimpl.UnknownFields
-	extensionFields protoimpl.ExtensionFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Is this enum value deprecated?
 	// Depending on the target platform, this can emit Deprecated annotations
 	// for the enum value, or it will be completely ignored; in the very least,
@@ -3012,8 +2963,13 @@ type EnumValueOptions struct {
 	// out when using debug formats, e.g. when the field contains sensitive
 	// credentials.
 	DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
+	// Information about the support window of a feature value.
+	FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	extensionFields     protoimpl.ExtensionFields
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for EnumValueOptions fields.
@@ -3024,11 +2980,9 @@ const (
 
 func (x *EnumValueOptions) Reset() {
 	*x = EnumValueOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *EnumValueOptions) String() string {
@@ -3039,7 +2993,7 @@ func (*EnumValueOptions) ProtoMessage() {}
 
 func (x *EnumValueOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3075,6 +3029,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool {
 	return Default_EnumValueOptions_DebugRedact
 }
 
+func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport {
+	if x != nil {
+		return x.FeatureSupport
+	}
+	return nil
+}
+
 func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
 	if x != nil {
 		return x.UninterpretedOption
@@ -3083,11 +3044,7 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type ServiceOptions struct {
-	state           protoimpl.MessageState
-	sizeCache       protoimpl.SizeCache
-	unknownFields   protoimpl.UnknownFields
-	extensionFields protoimpl.ExtensionFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Any features defined in the specific edition.
 	Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
 	// Is this service deprecated?
@@ -3097,6 +3054,9 @@ type ServiceOptions struct {
 	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	extensionFields     protoimpl.ExtensionFields
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for ServiceOptions fields.
@@ -3106,11 +3066,9 @@ const (
 
 func (x *ServiceOptions) Reset() {
 	*x = ServiceOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ServiceOptions) String() string {
@@ -3121,7 +3079,7 @@ func (*ServiceOptions) ProtoMessage() {}
 
 func (x *ServiceOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3158,11 +3116,7 @@ func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type MethodOptions struct {
-	state           protoimpl.MessageState
-	sizeCache       protoimpl.SizeCache
-	unknownFields   protoimpl.UnknownFields
-	extensionFields protoimpl.ExtensionFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Is this method deprecated?
 	// Depending on the target platform, this can emit Deprecated annotations
 	// for the method, or it will be completely ignored; in the very least,
@@ -3173,6 +3127,9 @@ type MethodOptions struct {
 	Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	extensionFields     protoimpl.ExtensionFields
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for MethodOptions fields.
@@ -3183,11 +3140,9 @@ const (
 
 func (x *MethodOptions) Reset() {
 	*x = MethodOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *MethodOptions) String() string {
@@ -3198,7 +3153,7 @@ func (*MethodOptions) ProtoMessage() {}
 
 func (x *MethodOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3248,11 +3203,8 @@ func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
 // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
 // in them.
 type UninterpretedOption struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
-	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+	state protoimpl.MessageState          `protogen:"open.v1"`
+	Name  []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
 	// The value of the uninterpreted option, in whatever type the tokenizer
 	// identified it as during parsing. Exactly one of these should be set.
 	IdentifierValue  *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
@@ -3261,15 +3213,15 @@ type UninterpretedOption struct {
 	DoubleValue      *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
 	StringValue      []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
 	AggregateValue   *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+	unknownFields    protoimpl.UnknownFields
+	sizeCache        protoimpl.SizeCache
 }
 
 func (x *UninterpretedOption) Reset() {
 	*x = UninterpretedOption{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *UninterpretedOption) String() string {
@@ -3280,7 +3232,7 @@ func (*UninterpretedOption) ProtoMessage() {}
 
 func (x *UninterpretedOption) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3351,26 +3303,23 @@ func (x *UninterpretedOption) GetAggregateValue() string {
 // be designed and implemented to handle this, hopefully before we ever hit a
 // conflict here.
 type FeatureSet struct {
-	state           protoimpl.MessageState
-	sizeCache       protoimpl.SizeCache
-	unknownFields   protoimpl.UnknownFields
-	extensionFields protoimpl.ExtensionFields
-
+	state                 protoimpl.MessageState            `protogen:"open.v1"`
 	FieldPresence         *FeatureSet_FieldPresence         `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
 	EnumType              *FeatureSet_EnumType              `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
 	RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
 	Utf8Validation        *FeatureSet_Utf8Validation        `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
 	MessageEncoding       *FeatureSet_MessageEncoding       `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
 	JsonFormat            *FeatureSet_JsonFormat            `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+	extensionFields       protoimpl.ExtensionFields
+	unknownFields         protoimpl.UnknownFields
+	sizeCache             protoimpl.SizeCache
 }
 
 func (x *FeatureSet) Reset() {
 	*x = FeatureSet{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FeatureSet) String() string {
@@ -3381,7 +3330,7 @@ func (*FeatureSet) ProtoMessage() {}
 
 func (x *FeatureSet) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3443,10 +3392,7 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
 // feature resolution. The resolution with this object becomes a simple search
 // for the closest matching edition, followed by proto merges.
 type FeatureSetDefaults struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state    protoimpl.MessageState                         `protogen:"open.v1"`
 	Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"`
 	// The minimum supported edition (inclusive) when this was constructed.
 	// Editions before this will not have defaults.
@@ -3454,15 +3400,15 @@ type FeatureSetDefaults struct {
 	// The maximum known edition (inclusive) when this was constructed. Editions
 	// after this will not have reliable defaults.
 	MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"`
+	unknownFields  protoimpl.UnknownFields
+	sizeCache      protoimpl.SizeCache
 }
 
 func (x *FeatureSetDefaults) Reset() {
 	*x = FeatureSetDefaults{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FeatureSetDefaults) String() string {
@@ -3473,7 +3419,7 @@ func (*FeatureSetDefaults) ProtoMessage() {}
 
 func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3512,10 +3458,7 @@ func (x *FeatureSetDefaults) GetMaximumEdition() Edition {
 // Encapsulates information about the original source file from which a
 // FileDescriptorProto was generated.
 type SourceCodeInfo struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// A Location identifies a piece of source code in a .proto file which
 	// corresponds to a particular definition.  This information is intended
 	// to be useful to IDEs, code indexers, documentation generators, and similar
@@ -3564,16 +3507,17 @@ type SourceCodeInfo struct {
 	//   - Code which tries to interpret locations should probably be designed to
 	//     ignore those that it doesn't understand, as more types of locations could
 	//     be recorded in the future.
-	Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+	Location        []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+	extensionFields protoimpl.ExtensionFields
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
 }
 
 func (x *SourceCodeInfo) Reset() {
 	*x = SourceCodeInfo{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SourceCodeInfo) String() string {
@@ -3584,7 +3528,7 @@ func (*SourceCodeInfo) ProtoMessage() {}
 
 func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3610,22 +3554,19 @@ func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
 // file. A GeneratedCodeInfo message is associated with only one generated
 // source file, but may contain references to different source .proto files.
 type GeneratedCodeInfo struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// An Annotation connects some span of text in generated code to an element
 	// of its generating .proto file.
-	Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+	Annotation    []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *GeneratedCodeInfo) Reset() {
 	*x = GeneratedCodeInfo{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *GeneratedCodeInfo) String() string {
@@ -3636,7 +3577,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3659,22 +3600,19 @@ func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
 }
 
 type DescriptorProto_ExtensionRange struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	Start         *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+	End           *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
+	Options       *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
 	unknownFields protoimpl.UnknownFields
-
-	Start   *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
-	End     *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
-	Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *DescriptorProto_ExtensionRange) Reset() {
 	*x = DescriptorProto_ExtensionRange{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *DescriptorProto_ExtensionRange) String() string {
@@ -3685,7 +3623,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
 
 func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3725,21 +3663,18 @@ func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
 // fields or extension ranges in the same message. Reserved ranges may
 // not overlap.
 type DescriptorProto_ReservedRange struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	Start         *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+	End           *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
 	unknownFields protoimpl.UnknownFields
-
-	Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
-	End   *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *DescriptorProto_ReservedRange) Reset() {
 	*x = DescriptorProto_ReservedRange{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *DescriptorProto_ReservedRange) String() string {
@@ -3750,7 +3685,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {}
 
 func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3780,10 +3715,7 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 {
 }
 
 type ExtensionRangeOptions_Declaration struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The extension number declared within the extension range.
 	Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"`
 	// The fully-qualified name of the extension field. There must be a leading
@@ -3799,16 +3731,16 @@ type ExtensionRangeOptions_Declaration struct {
 	Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"`
 	// If true, indicates that the extension must be defined as repeated.
 	// Otherwise the extension must be defined as optional.
-	Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"`
+	Repeated      *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *ExtensionRangeOptions_Declaration) Reset() {
 	*x = ExtensionRangeOptions_Declaration{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ExtensionRangeOptions_Declaration) String() string {
@@ -3819,7 +3751,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {}
 
 func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3876,21 +3808,18 @@ func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool {
 // is inclusive such that it can appropriately represent the entire int32
 // domain.
 type EnumDescriptorProto_EnumReservedRange struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	Start         *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+	End           *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Inclusive.
 	unknownFields protoimpl.UnknownFields
-
-	Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
-	End   *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Inclusive.
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
 	*x = EnumDescriptorProto_EnumReservedRange{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *EnumDescriptorProto_EnumReservedRange) String() string {
@@ -3901,7 +3830,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
 
 func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3931,21 +3860,18 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
 }
 
 type FieldOptions_EditionDefault struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	Edition       *Edition               `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+	Value         *string                `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
 	unknownFields protoimpl.UnknownFields
-
-	Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
-	Value   *string  `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *FieldOptions_EditionDefault) Reset() {
 	*x = FieldOptions_EditionDefault{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FieldOptions_EditionDefault) String() string {
@@ -3956,7 +3882,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {}
 
 func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3987,10 +3913,7 @@ func (x *FieldOptions_EditionDefault) GetValue() string {
 
 // Information about the support window of a feature.
 type FieldOptions_FeatureSupport struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The edition that this feature was first available in.  In editions
 	// earlier than this one, the default assigned to EDITION_LEGACY will be
 	// used, and proto files will not be able to override it.
@@ -4005,15 +3928,15 @@ type FieldOptions_FeatureSupport struct {
 	// this one, the last default assigned will be used, and proto files will
 	// not be able to override it.
 	EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"`
+	unknownFields  protoimpl.UnknownFields
+	sizeCache      protoimpl.SizeCache
 }
 
 func (x *FieldOptions_FeatureSupport) Reset() {
 	*x = FieldOptions_FeatureSupport{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FieldOptions_FeatureSupport) String() string {
@@ -4024,7 +3947,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {}
 
 func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4073,21 +3996,18 @@ func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition {
 // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents
 // "foo.(bar.baz).moo".
 type UninterpretedOption_NamePart struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	NamePart      *string                `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+	IsExtension   *bool                  `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
 	unknownFields protoimpl.UnknownFields
-
-	NamePart    *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
-	IsExtension *bool   `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *UninterpretedOption_NamePart) Reset() {
 	*x = UninterpretedOption_NamePart{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *UninterpretedOption_NamePart) String() string {
@@ -4098,7 +4018,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {}
 
 func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4132,24 +4052,21 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
 // the defaults at the closest matching edition ordered at or before it should
 // be used.  This field must be in strict ascending order by edition.
 type FeatureSetDefaults_FeatureSetEditionDefault struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
-	Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+	state   protoimpl.MessageState `protogen:"open.v1"`
+	Edition *Edition               `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
 	// Defaults of features that can be overridden in this edition.
 	OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"`
 	// Defaults of features that can't be overridden in this edition.
 	FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
 	*x = FeatureSetDefaults_FeatureSetEditionDefault{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
@@ -4160,7 +4077,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4197,10 +4114,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *Featur
 }
 
 type SourceCodeInfo_Location struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Identifies which part of the FileDescriptorProto was defined at this
 	// location.
 	//
@@ -4292,15 +4206,15 @@ type SourceCodeInfo_Location struct {
 	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
 	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
 	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
 }
 
 func (x *SourceCodeInfo_Location) Reset() {
 	*x = SourceCodeInfo_Location{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SourceCodeInfo_Location) String() string {
@@ -4311,7 +4225,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {}
 
 func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4362,10 +4276,7 @@ func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
 }
 
 type GeneratedCodeInfo_Annotation struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Identifies the element in the original source .proto file. This field
 	// is formatted the same as SourceCodeInfo.Location.path.
 	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
@@ -4377,17 +4288,17 @@ type GeneratedCodeInfo_Annotation struct {
 	// Identifies the ending offset in bytes in the generated code that
 	// relates to the identified object. The end offset should be one past
 	// the last relevant byte (so the length of the text = end - begin).
-	End      *int32                                 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
-	Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
+	End           *int32                                 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+	Semantic      *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 func (x *GeneratedCodeInfo_Annotation) Reset() {
 	*x = GeneratedCodeInfo_Annotation{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *GeneratedCodeInfo_Annotation) String() string {
@@ -4398,7 +4309,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4450,476 +4361,324 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio
 
 var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_descriptor_proto_rawDesc = []byte{
+var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{
 	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
 	0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
 	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+	0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
 	0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
 	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
 	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
 	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
-	0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
-	0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18,
-	0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65,
-	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65,
-	0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c,
-	0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20,
-	0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e,
-	0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65,
-	0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e,
-	0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43,
-	0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
-	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54,
-	0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
-	0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e,
-	0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
-	0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
-	0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74,
-	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36,
-	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
-	0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
-	0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
-	0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
-	0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69,
-	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69,
-	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06,
-	0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63,
-	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65,
-	0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18,
-	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78,
-	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65,
-	0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
-	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a,
-	0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e,
-	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a,
-	0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
-	0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
-	0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
-	0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
-	0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e,
-	0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a,
-	0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
-	0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65,
-	0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
-	0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
-	0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
-	0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
-	0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
-	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03,
-	0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40,
-	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
-	0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
-	0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
-	0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78,
-	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
-	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a,
-	0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
-	0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61,
-	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63,
-	0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
-	0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
-	0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
-	0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88,
-	0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
-	0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
-	0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
-	0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c,
-	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c,
-	0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73,
-	0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73,
-	0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
-	0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
-	0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66,
-	0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b,
-	0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a,
-	0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08,
-	0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65,
+	0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01,
+	0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07,
+	0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70,
+	0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
+	0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65,
+	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
+	0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28,
+	0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65,
+	0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65,
+	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65,
+	0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c,
+	0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70,
+	0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d,
+	0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
+	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07,
+	0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e,
+	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
 	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
-	0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a,
-	0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c,
-	0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
-	0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a,
-	0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66,
-	0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f,
-	0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20,
-	0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12,
-	0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01,
-	0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
+	0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
 	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6,
-	0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
-	0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
-	0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45,
-	0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a,
-	0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b,
-	0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a,
-	0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a,
-	0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12,
-	0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12,
-	0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d,
-	0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12,
-	0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32,
-	0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45,
-	0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49,
-	0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
-	0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c,
-	0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e,
-	0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45,
-	0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45,
-	0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14,
-	0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
-	0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
-	0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
-	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
-	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
-	0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
-	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36,
-	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
-	0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36,
+	0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63,
+	0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
 	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
-	0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
-	0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65,
-	0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e,
-	0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
+	0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
+	0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
+	0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
+	0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
+	0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f,
+	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+	0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64,
+	0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f,
+	0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65,
+	0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d,
+	0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e,
+	0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
+	0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65,
+	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+	0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64,
+	0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
+	0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+	0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
+	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
+	0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+	0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d,
+	0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a,
+	0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a,
+	0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61,
+	0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52,
+	0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
+	0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37,
+	0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
 	0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
 	0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d,
-	0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
-	0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62,
-	0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
-	0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01,
-	0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06,
-	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
-	0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68,
-	0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74,
-	0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74,
-	0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74,
-	0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75,
-	0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
-	0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
-	0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
-	0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65,
-	0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
-	0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b,
-	0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50,
-	0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f,
-	0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08,
-	0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43,
-	0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61,
-	0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18,
-	0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61,
-	0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12,
-	0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
-	0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68,
-	0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61,
-	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e,
-	0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74,
-	0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18,
-	0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61,
-	0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66,
-	0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f,
-	0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f,
-	0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d,
-	0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63,
-	0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61,
-	0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65,
-	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e,
-	0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15,
-	0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72,
-	0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
-	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65,
-	0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47,
-	0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25,
-	0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
-	0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62,
-	0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a,
-	0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41,
-	0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c,
-	0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69,
-	0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
-	0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68,
-	0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
-	0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01,
-	0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
-	0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65,
-	0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c,
-	0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70,
-	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34,
-	0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14,
-	0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73,
-	0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63,
-	0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79,
-	0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
-	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70,
-	0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50,
-	0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49,
-	0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e,
-	0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
-	0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03,
-	0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f,
-	0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
-	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
-	0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c,
-	0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65,
-	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f,
-	0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c,
-	0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
-	0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a,
-	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
-	0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79,
-	0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79,
-	0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c,
-	0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08,
-	0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
-	0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43,
-	0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
+	0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
 	0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
 	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
 	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
 	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
 	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
-	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05,
-	0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04,
-	0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
-	0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e,
-	0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b,
-	0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64,
-	0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,
-	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
-	0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41,
-	0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a,
-	0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04,
-	0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69,
-	0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
-	0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
-	0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
-	0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
-	0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77,
-	0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f,
-	0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74,
-	0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20,
-	0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
-	0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a,
-	0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07,
-	0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69,
-	0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28,
-	0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52,
-	0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73,
-	0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01,
-	0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52,
-	0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01,
-	0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
-	0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
-	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
-	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07,
-	0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e,
+	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64,
+	0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67,
+	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61,
+	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
+	0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
+	0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+	0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65,
+	0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a,
+	0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02,
+	0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94,
+	0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16,
+	0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06,
+	0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e,
+	0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e,
+	0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
+	0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
+	0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18,
+	0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a,
+	0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45,
+	0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55,
+	0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07,
+	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+	0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20,
+	0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c,
+	0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e,
+	0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b,
+	0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65,
+	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
+	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75,
+	0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
+	0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
+	0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28,
+	0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a,
+	0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a,
+	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f,
+	0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
+	0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49,
+	0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
+	0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
+	0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54,
+	0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
+	0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54,
+	0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54,
+	0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a,
+	0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a,
+	0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d,
+	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a,
+	0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f,
+	0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36,
+	0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54,
+	0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e,
+	0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12,
+	0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c,
+	0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45,
+	0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f,
+	0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e,
+	0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
+	0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
+	0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
 	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
-	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69,
-	0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52,
-	0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63,
-	0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65,
-	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
-	0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64,
-	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69,
-	0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f,
-	0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18,
-	0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52,
-	0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22,
-	0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49,
-	0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10,
-	0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02,
-	0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53,
-	0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f,
-	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
-	0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45,
-	0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
-	0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52,
-	0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45,
-	0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c,
-	0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54,
-	0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
-	0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10,
-	0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45,
-	0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47,
-	0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
-	0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11,
-	0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c,
-	0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
-	0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41,
-	0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06,
-	0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13,
-	0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
-	0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f,
-	0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08,
-	0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04,
-	0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+	0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
+	0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+	0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52,
+	0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61,
+	0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f,
+	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65,
+	0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d,
+	0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
+	0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74,
+	0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
+	0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b,
+	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16,
+	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65,
+	0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
+	0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+	0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
+	0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+	0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79,
+	0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
+	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54,
+	0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a,
+	0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
+	0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f,
+	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12,
+	0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
+	0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+	0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
+	0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63,
+	0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74,
+	0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61,
+	0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d,
+	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20,
+	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61,
+	0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a,
+	0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65,
+	0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14,
+	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65,
+	0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48,
+	0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69,
+	0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20,
+	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61,
+	0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12,
+	0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18,
+	0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65,
+	0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
+	0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
+	0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b,
+	0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
+	0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
+	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72,
+	0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61,
+	0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
+	0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+	0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01,
+	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e,
+	0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a,
+	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08,
+	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+	0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
+	0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74,
+	0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65,
+	0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73,
+	0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
+	0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
+	0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
+	0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72,
+	0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77,
+	0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a,
+	0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
+	0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73,
+	0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e,
+	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
+	0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16,
+	0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d,
+	0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68,
+	0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+	0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
+	0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61,
+	0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
+	0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
 	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
 	0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
 	0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
@@ -4927,307 +4686,468 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
 	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
 	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
 	0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
-	0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69,
-	0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41,
-	0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
-	0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
-	0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64,
-	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79,
-	0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
-	0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52,
-	0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63,
-	0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69,
-	0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
-	0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
-	0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14,
-	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e,
-	0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
-	0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d,
-	0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a,
-	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
-	0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
-	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
-	0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c,
-	0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67,
-	0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
-	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7,
-	0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69,
-	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e,
-	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37,
-	0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b,
-	0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
-	0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58,
-	0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
-	0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
-	0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11,
-	0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65,
-	0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65,
-	0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
-	0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69,
-	0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
-	0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08,
-	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
-	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
-	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
-	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63,
-	0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
-	0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
-	0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43,
-	0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45,
-	0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22,
-	0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
-	0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65,
-	0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64,
-	0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03,
-	0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
-	0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76,
-	0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
-	0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61,
-	0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f,
-	0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52,
-	0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
-	0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69,
-	0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65,
-	0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
-	0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09,
-	0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52,
-	0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f,
-	0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52,
-	0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x0a, 0x0a,
-	0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e,
-	0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01,
-	0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
-	0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42,
-	0x3f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
-	0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49,
-	0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
-	0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07,
-	0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12,
-	0x6c, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e,
-	0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06,
-	0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6,
-	0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03,
-	0x08, 0xe8, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01,
-	0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32,
-	0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70,
-	0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69,
-	0x6e, 0x67, 0x42, 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d,
-	0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b,
-	0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8,
-	0x07, 0x52, 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38,
-	0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
-	0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55,
-	0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88,
-	0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e,
-	0x45, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18,
-	0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61,
-	0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73,
-	0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e,
-	0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42,
-	0x26, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c,
-	0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6,
-	0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
-	0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f,
-	0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
+	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69,
+	0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45,
+	0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45,
+	0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49,
+	0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
+	0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70,
+	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+	0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f,
+	0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65,
+	0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d,
+	0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72,
+	0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63,
+	0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+	0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72,
+	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03,
+	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
+	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65,
+	0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+	0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
+	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b,
+	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+	0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08,
+	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
 	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e,
-	0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01,
-	0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f,
-	0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01,
-	0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8,
-	0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a,
-	0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a,
-	0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45,
-	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58,
-	0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c,
-	0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59,
-	0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45,
-	0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f,
-	0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08,
-	0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53,
-	0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
-	0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a,
-	0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f,
-	0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
-	0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c,
-	0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e,
-	0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b,
-	0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f,
-	0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56,
-	0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10,
-	0x03, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f,
-	0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f,
-	0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
-	0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45,
-	0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d,
-	0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f,
-	0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52,
-	0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a,
-	0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41,
-	0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02,
-	0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07,
-	0x2a, 0x06, 0x08, 0xea, 0x07, 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x86, 0x4e, 0x10, 0x87, 0x4e,
-	0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e,
-	0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xd9, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12,
-	0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
-	0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65,
-	0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
-	0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52,
-	0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e,
-	0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01,
+	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
+	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
+	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
+	0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08,
+	0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65,
+	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79,
+	0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53,
+	0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
+	0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61,
+	0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
+	0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a,
+	0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+	0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65,
+	0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28,
+	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69,
+	0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
+	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
+	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
+	0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+	0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
+	0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
+	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
+	0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74,
+	0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
+	0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
+	0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
+	0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65,
+	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18,
+	0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
+	0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
+	0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
+	0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
+	0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a,
+	0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
+	0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
+	0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
+	0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+	0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a,
+	0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+	0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a,
+	0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75,
+	0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
+	0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72,
+	0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
+	0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
 	0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69,
-	0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f,
-	0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
-	0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52,
-	0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a,
-	0xe2, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07,
-	0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64,
+	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
+	0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77,
+	0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65,
+	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
+	0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f,
+	0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
+	0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f,
+	0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06,
+	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44,
+	0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45,
+	0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d,
+	0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a,
+	0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
+	0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15,
+	0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
+	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49,
+	0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10,
+	0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
+	0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72,
+	0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
+	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
+	0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
+	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
+	0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
+	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03,
+	0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45,
+	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14,
+	0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
+	0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54,
+	0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07,
+	0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52,
+	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10,
+	0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
+	0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65,
+	0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
+	0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+	0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
+	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8,
+	0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d,
+	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+	0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c,
+	0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
+	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
+	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
+	0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65,
+	0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
+	0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42,
+	0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c,
+	0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f,
+	0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
+	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
+	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
+	0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10,
+	0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
+	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
+	0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64,
+	0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
+	0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
+	0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
+	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
+	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69,
+	0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
+	0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+	0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
+	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
+	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
+	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99,
+	0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21,
+	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
+	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70,
+	0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
+	0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
+	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f,
+	0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a,
+	0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65,
+	0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
+	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f,
+	0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12,
+	0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a,
+	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55,
+	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
+	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52,
+	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
+	0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
+	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f,
+	0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c,
+	0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61,
+	0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+	0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01,
+	0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
+	0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67,
+	0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e,
+	0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f,
+	0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65,
+	0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e,
+	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64,
+	0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+	0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65,
+	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98,
+	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
+	0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43,
+	0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
+	0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e,
+	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
 	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
-	0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f,
-	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54,
+	0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01,
+	0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12,
+	0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08,
+	0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70,
+	0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
+	0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88,
+	0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
+	0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43,
+	0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65,
+	0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64,
+	0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69,
+	0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61,
+	0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04,
+	0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2,
+	0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03,
+	0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65,
+	0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+	0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98,
+	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48,
+	0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08,
+	0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64,
+	0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72,
+	0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61,
+	0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2,
+	0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f,
+	0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c,
+	0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73,
+	0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c,
+	0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45,
+	0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
+	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49,
+	0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+	0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55,
+	0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
+	0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45,
+	0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22,
+	0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45,
+	0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
+	0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a,
+	0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
+	0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
+	0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46,
+	0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b,
+	0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59,
+	0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01,
+	0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
+	0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
+	0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
+	0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
+	0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49,
+	0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
+	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f,
+	0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09,
+	0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47,
+	0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10,
+	0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
+	0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8,
+	0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
+	0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61,
+	0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e,
+	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f,
+	0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+	0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64,
+	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
+	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64,
+	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
+	0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
 	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65,
-	0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43,
-	0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
-	0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72,
-	0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01,
-	0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61,
-	0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61,
-	0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05,
-	0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65,
-	0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03,
-	0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d,
-	0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e,
-	0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e,
-	0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65,
-	0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18,
-	0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65,
-	0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0,
-	0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65,
-	0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72,
-	0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e,
-	0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
-	0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05,
-	0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f,
-	0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62,
-	0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69,
-	0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03,
-	0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18,
-	0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75,
+	0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+	0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65,
+	0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
+	0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
+	0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c,
+	0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78,
+	0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d,
+	0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08,
+	0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
+	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
+	0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
+	0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
+	0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
+	0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
+	0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
+	0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
+	0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
+	0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
+	0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
+	0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
+	0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
+	0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08,
+	0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11,
+	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
+	0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
 	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
 	0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73,
-	0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e,
-	0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a,
-	0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10,
-	0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a,
-	0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
-	0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45,
-	0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49,
-	0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e,
-	0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+	0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10,
+	0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63,
+	0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f,
+	0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69,
+	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10,
+	0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64,
+	0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
+	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61,
+	0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
+	0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45,
+	0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7,
+	0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44,
+	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
+	0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43,
+	0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
+	0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49,
+	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11,
+	0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8,
 	0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32,
-	0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49,
-	0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01,
-	0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45,
-	0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49,
-	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
-	0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54,
-	0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f,
-	0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49,
-	0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e,
-	0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
-	0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63,
-	0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
-	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02,
-	0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
-}
+	0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
+	0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a,
+	0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
+	0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
+	0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
+	0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
+	0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
+	0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
+	0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
+	0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
+	0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42,
+	0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+	0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
+	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+	0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+	0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65,
+	0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+})
 
 var (
 	file_google_protobuf_descriptor_proto_rawDescOnce sync.Once
-	file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc
+	file_google_protobuf_descriptor_proto_rawDescData []byte
 )
 
 func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
 	file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() {
-		file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData)
+		file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)))
 	})
 	return file_google_protobuf_descriptor_proto_rawDescData
 }
 
 var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
 var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
-var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
+var file_google_protobuf_descriptor_proto_goTypes = []any{
 	(Edition)(0), // 0: google.protobuf.Edition
 	(ExtensionRangeOptions_VerificationState)(0),        // 1: google.protobuf.ExtensionRangeOptions.VerificationState
 	(FieldDescriptorProto_Type)(0),                      // 2: google.protobuf.FieldDescriptorProto.Type
@@ -5329,38 +5249,39 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{
 	36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
 	35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
 	36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 49: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 50: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 51: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	9,  // 52: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
-	36, // 53: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 54: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	46, // 55: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
-	10, // 56: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
-	11, // 57: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
-	12, // 58: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
-	13, // 59: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
-	14, // 60: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
-	15, // 61: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
-	47, // 62: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
-	0,  // 63: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
-	0,  // 64: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
-	48, // 65: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
-	49, // 66: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
-	20, // 67: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
-	0,  // 68: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
-	0,  // 69: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
-	0,  // 70: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
-	0,  // 71: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
-	0,  // 72: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
-	36, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
-	36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
-	16, // 75: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	76, // [76:76] is the sub-list for method output_type
-	76, // [76:76] is the sub-list for method input_type
-	76, // [76:76] is the sub-list for extension type_name
-	76, // [76:76] is the sub-list for extension extendee
-	0,  // [0:76] is the sub-list for field type_name
+	45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+	35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+	35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	9,  // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+	36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+	35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+	10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+	11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+	12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+	13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+	14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+	15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+	47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	0,  // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+	0,  // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+	48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+	49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+	20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+	0,  // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+	0,  // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
+	0,  // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
+	0,  // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
+	0,  // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+	36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
+	36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
+	16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	77, // [77:77] is the sub-list for method output_type
+	77, // [77:77] is the sub-list for method input_type
+	77, // [77:77] is the sub-list for extension type_name
+	77, // [77:77] is the sub-list for extension extendee
+	0,  // [0:77] is the sub-list for field type_name
 }
 
 func init() { file_google_protobuf_descriptor_proto_init() }
@@ -5368,429 +5289,11 @@ func file_google_protobuf_descriptor_proto_init() {
 	if File_google_protobuf_descriptor_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FileDescriptorSet); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FileDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*DescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ExtensionRangeOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FieldDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*OneofDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*EnumDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*EnumValueDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ServiceDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*MethodDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FileOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*MessageOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FieldOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*OneofOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*EnumOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*EnumValueOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ServiceOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*MethodOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*UninterpretedOption); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FeatureSet); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FeatureSetDefaults); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SourceCodeInfo); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*GeneratedCodeInfo); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*DescriptorProto_ExtensionRange); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*DescriptorProto_ReservedRange); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ExtensionRangeOptions_Declaration); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FieldOptions_EditionDefault); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FieldOptions_FeatureSupport); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*UninterpretedOption_NamePart); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SourceCodeInfo_Location); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*GeneratedCodeInfo_Annotation); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
 			NumEnums:      17,
 			NumMessages:   33,
 			NumExtensions: 0,
@@ -5802,7 +5305,6 @@ func file_google_protobuf_descriptor_proto_init() {
 		MessageInfos:      file_google_protobuf_descriptor_proto_msgTypes,
 	}.Build()
 	File_google_protobuf_descriptor_proto = out.File
-	file_google_protobuf_descriptor_proto_rawDesc = nil
 	file_google_protobuf_descriptor_proto_goTypes = nil
 	file_google_protobuf_descriptor_proto_depIdxs = nil
 }
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index b0df3fb3..28d24bad 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -16,24 +16,153 @@ import (
 	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
 	reflect "reflect"
 	sync "sync"
+	unsafe "unsafe"
 )
 
-type GoFeatures struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
+type GoFeatures_APILevel int32
 
+const (
+	// API_LEVEL_UNSPECIFIED results in selecting the OPEN API,
+	// but needs to be a separate value to distinguish between
+	// an explicitly set api level or a missing api level.
+	GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0
+	GoFeatures_API_OPEN              GoFeatures_APILevel = 1
+	GoFeatures_API_HYBRID            GoFeatures_APILevel = 2
+	GoFeatures_API_OPAQUE            GoFeatures_APILevel = 3
+)
+
+// Enum value maps for GoFeatures_APILevel.
+var (
+	GoFeatures_APILevel_name = map[int32]string{
+		0: "API_LEVEL_UNSPECIFIED",
+		1: "API_OPEN",
+		2: "API_HYBRID",
+		3: "API_OPAQUE",
+	}
+	GoFeatures_APILevel_value = map[string]int32{
+		"API_LEVEL_UNSPECIFIED": 0,
+		"API_OPEN":              1,
+		"API_HYBRID":            2,
+		"API_OPAQUE":            3,
+	}
+)
+
+func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel {
+	p := new(GoFeatures_APILevel)
+	*p = x
+	return p
+}
+
+func (x GoFeatures_APILevel) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor()
+}
+
+func (GoFeatures_APILevel) Type() protoreflect.EnumType {
+	return &file_google_protobuf_go_features_proto_enumTypes[0]
+}
+
+func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = GoFeatures_APILevel(num)
+	return nil
+}
+
+// Deprecated: Use GoFeatures_APILevel.Descriptor instead.
+func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type GoFeatures_StripEnumPrefix int32
+
+const (
+	GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED   GoFeatures_StripEnumPrefix = 0
+	GoFeatures_STRIP_ENUM_PREFIX_KEEP          GoFeatures_StripEnumPrefix = 1
+	GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2
+	GoFeatures_STRIP_ENUM_PREFIX_STRIP         GoFeatures_StripEnumPrefix = 3
+)
+
+// Enum value maps for GoFeatures_StripEnumPrefix.
+var (
+	GoFeatures_StripEnumPrefix_name = map[int32]string{
+		0: "STRIP_ENUM_PREFIX_UNSPECIFIED",
+		1: "STRIP_ENUM_PREFIX_KEEP",
+		2: "STRIP_ENUM_PREFIX_GENERATE_BOTH",
+		3: "STRIP_ENUM_PREFIX_STRIP",
+	}
+	GoFeatures_StripEnumPrefix_value = map[string]int32{
+		"STRIP_ENUM_PREFIX_UNSPECIFIED":   0,
+		"STRIP_ENUM_PREFIX_KEEP":          1,
+		"STRIP_ENUM_PREFIX_GENERATE_BOTH": 2,
+		"STRIP_ENUM_PREFIX_STRIP":         3,
+	}
+)
+
+func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix {
+	p := new(GoFeatures_StripEnumPrefix)
+	*p = x
+	return p
+}
+
+func (x GoFeatures_StripEnumPrefix) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor()
+}
+
+func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType {
+	return &file_google_protobuf_go_features_proto_enumTypes[1]
+}
+
+func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = GoFeatures_StripEnumPrefix(num)
+	return nil
+}
+
+// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead.
+func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1}
+}
+
+type GoFeatures struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Whether or not to generate the deprecated UnmarshalJSON method for enums.
+	// Can only be true for proto using the Open Struct api.
 	LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"`
+	// One of OPEN, HYBRID or OPAQUE.
+	ApiLevel        *GoFeatures_APILevel        `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"`
+	StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"`
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
 }
 
 func (x *GoFeatures) Reset() {
 	*x = GoFeatures{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_go_features_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_go_features_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *GoFeatures) String() string {
@@ -44,7 +173,7 @@ func (*GoFeatures) ProtoMessage() {}
 
 func (x *GoFeatures) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_go_features_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -66,6 +195,20 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool {
 	return false
 }
 
+func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel {
+	if x != nil && x.ApiLevel != nil {
+		return *x.ApiLevel
+	}
+	return GoFeatures_API_LEVEL_UNSPECIFIED
+}
+
+func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix {
+	if x != nil && x.StripEnumPrefix != nil {
+		return *x.StripEnumPrefix
+	}
+	return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED
+}
+
 var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
 	{
 		ExtendedType:  (*descriptorpb.FeatureSet)(nil),
@@ -85,59 +228,94 @@ var (
 
 var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_go_features_proto_rawDesc = []byte{
+var file_google_protobuf_go_features_proto_rawDesc = string([]byte{
 	0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
 	0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
 	0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
 	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x47, 0x6f,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
+	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f,
+	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
 	0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73,
-	0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x7d, 0x88,
-	0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, 0x65, 0x18, 0x84,
-	0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, 0x07, 0xb2, 0x01,
-	0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, 0x20, 0x6c, 0x65, 0x67,
-	0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x53, 0x4f,
-	0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
-	0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20,
-	0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74,
-	0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65,
-	0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x73, 0x6f,
-	0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52,
-	0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f,
-	0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x70, 0x62,
-}
+	0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01,
+	0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72,
+	0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18,
+	0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65,
+	0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
+	0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70,
+	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c,
+	0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61,
+	0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+	0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
+	0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69,
+	0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70,
+	0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49,
+	0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01,
+	0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55,
+	0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f,
+	0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2,
+	0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
+	0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72,
+	0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e,
+	0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70,
+	0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98,
+	0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52,
+	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
+	0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74,
+	0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a,
+	0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49,
+	0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
+	0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e,
+	0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44,
+	0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45,
+	0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d,
+	0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f,
+	0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50,
+	0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52,
+	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
+	0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45,
+	0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52,
+	0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54,
+	0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f,
+	0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
+	0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
+})
 
 var (
 	file_google_protobuf_go_features_proto_rawDescOnce sync.Once
-	file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc
+	file_google_protobuf_go_features_proto_rawDescData []byte
 )
 
 func file_google_protobuf_go_features_proto_rawDescGZIP() []byte {
 	file_google_protobuf_go_features_proto_rawDescOnce.Do(func() {
-		file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData)
+		file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)))
 	})
 	return file_google_protobuf_go_features_proto_rawDescData
 }
 
+var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
 var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_go_features_proto_goTypes = []interface{}{
-	(*GoFeatures)(nil),              // 0: pb.GoFeatures
-	(*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet
+var file_google_protobuf_go_features_proto_goTypes = []any{
+	(GoFeatures_APILevel)(0),        // 0: pb.GoFeatures.APILevel
+	(GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix
+	(*GoFeatures)(nil),              // 2: pb.GoFeatures
+	(*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet
 }
 var file_google_protobuf_go_features_proto_depIdxs = []int32{
-	1, // 0: pb.go:extendee -> google.protobuf.FeatureSet
-	0, // 1: pb.go:type_name -> pb.GoFeatures
-	2, // [2:2] is the sub-list for method output_type
-	2, // [2:2] is the sub-list for method input_type
-	1, // [1:2] is the sub-list for extension type_name
-	0, // [0:1] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
+	0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel
+	1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix
+	3, // 2: pb.go:extendee -> google.protobuf.FeatureSet
+	2, // 3: pb.go:type_name -> pb.GoFeatures
+	4, // [4:4] is the sub-list for method output_type
+	4, // [4:4] is the sub-list for method input_type
+	3, // [3:4] is the sub-list for extension type_name
+	2, // [2:3] is the sub-list for extension extendee
+	0, // [0:2] is the sub-list for field type_name
 }
 
 func init() { file_google_protobuf_go_features_proto_init() }
@@ -145,37 +323,23 @@ func file_google_protobuf_go_features_proto_init() {
 	if File_google_protobuf_go_features_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*GoFeatures); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_google_protobuf_go_features_proto_rawDesc,
-			NumEnums:      0,
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)),
+			NumEnums:      2,
 			NumMessages:   1,
 			NumExtensions: 1,
 			NumServices:   0,
 		},
 		GoTypes:           file_google_protobuf_go_features_proto_goTypes,
 		DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs,
+		EnumInfos:         file_google_protobuf_go_features_proto_enumTypes,
 		MessageInfos:      file_google_protobuf_go_features_proto_msgTypes,
 		ExtensionInfos:    file_google_protobuf_go_features_proto_extTypes,
 	}.Build()
 	File_google_protobuf_go_features_proto = out.File
-	file_google_protobuf_go_features_proto_rawDesc = nil
 	file_google_protobuf_go_features_proto_goTypes = nil
 	file_google_protobuf_go_features_proto_depIdxs = nil
 }
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 9de51be5..497da66e 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -122,6 +122,7 @@ import (
 	reflect "reflect"
 	strings "strings"
 	sync "sync"
+	unsafe "unsafe"
 )
 
 // `Any` contains an arbitrary serialized protocol buffer message along with a
@@ -210,10 +211,7 @@ import (
 //	  "value": "1.212s"
 //	}
 type Any struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// A URL/resource name that uniquely identifies the type of the serialized
 	// protocol buffer message. This string must contain at least
 	// one "/" character. The last segment of the URL's path must represent
@@ -244,7 +242,9 @@ type Any struct {
 	// used with implementation specific semantics.
 	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
 	// Must be a valid serialized protocol buffer of the above specified type.
-	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	Value         []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // New marshals src into a new Any instance.
@@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) {
 
 func (x *Any) Reset() {
 	*x = Any{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_any_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_any_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Any) String() string {
@@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {}
 
 func (x *Any) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_any_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -414,7 +412,7 @@ func (x *Any) GetValue() []byte {
 
 var File_google_protobuf_any_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_any_proto_rawDesc = []byte{
+var file_google_protobuf_any_proto_rawDesc = string([]byte{
 	0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
 	0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
 	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
@@ -430,22 +428,22 @@ var file_google_protobuf_any_proto_rawDesc = []byte{
 	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
 	0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
 	0x6f, 0x74, 0x6f, 0x33,
-}
+})
 
 var (
 	file_google_protobuf_any_proto_rawDescOnce sync.Once
-	file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc
+	file_google_protobuf_any_proto_rawDescData []byte
 )
 
 func file_google_protobuf_any_proto_rawDescGZIP() []byte {
 	file_google_protobuf_any_proto_rawDescOnce.Do(func() {
-		file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData)
+		file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)))
 	})
 	return file_google_protobuf_any_proto_rawDescData
 }
 
 var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_any_proto_goTypes = []interface{}{
+var file_google_protobuf_any_proto_goTypes = []any{
 	(*Any)(nil), // 0: google.protobuf.Any
 }
 var file_google_protobuf_any_proto_depIdxs = []int32{
@@ -461,25 +459,11 @@ func file_google_protobuf_any_proto_init() {
 	if File_google_protobuf_any_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Any); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_google_protobuf_any_proto_rawDesc,
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)),
 			NumEnums:      0,
 			NumMessages:   1,
 			NumExtensions: 0,
@@ -490,7 +474,6 @@ func file_google_protobuf_any_proto_init() {
 		MessageInfos:      file_google_protobuf_any_proto_msgTypes,
 	}.Build()
 	File_google_protobuf_any_proto = out.File
-	file_google_protobuf_any_proto_rawDesc = nil
 	file_google_protobuf_any_proto_goTypes = nil
 	file_google_protobuf_any_proto_depIdxs = nil
 }
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index df709a8d..193880d1 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -80,6 +80,7 @@ import (
 	reflect "reflect"
 	sync "sync"
 	time "time"
+	unsafe "unsafe"
 )
 
 // A Duration represents a signed, fixed-length span of time represented
@@ -141,10 +142,7 @@ import (
 // be expressed in JSON format as "3.000000001s", and 3 seconds and 1
 // microsecond should be expressed in JSON format as "3.000001s".
 type Duration struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Signed seconds of the span of time. Must be from -315,576,000,000
 	// to +315,576,000,000 inclusive. Note: these bounds are computed from:
 	// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
@@ -155,7 +153,9 @@ type Duration struct {
 	// of one second or more, a non-zero value for the `nanos` field must be
 	// of the same sign as the `seconds` field. Must be from -999,999,999
 	// to +999,999,999 inclusive.
-	Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	Nanos         int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // New constructs a new Duration from the provided time.Duration.
@@ -245,11 +245,9 @@ func (x *Duration) check() uint {
 
 func (x *Duration) Reset() {
 	*x = Duration{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_duration_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_duration_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Duration) String() string {
@@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {}
 
 func (x *Duration) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_duration_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -291,7 +289,7 @@ func (x *Duration) GetNanos() int32 {
 
 var File_google_protobuf_duration_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_duration_proto_rawDesc = []byte{
+var file_google_protobuf_duration_proto_rawDesc = string([]byte{
 	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
 	0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
 	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
@@ -308,22 +306,22 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{
 	0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
 	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
 	0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
 
 var (
 	file_google_protobuf_duration_proto_rawDescOnce sync.Once
-	file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc
+	file_google_protobuf_duration_proto_rawDescData []byte
 )
 
 func file_google_protobuf_duration_proto_rawDescGZIP() []byte {
 	file_google_protobuf_duration_proto_rawDescOnce.Do(func() {
-		file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData)
+		file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)))
 	})
 	return file_google_protobuf_duration_proto_rawDescData
 }
 
 var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_duration_proto_goTypes = []interface{}{
+var file_google_protobuf_duration_proto_goTypes = []any{
 	(*Duration)(nil), // 0: google.protobuf.Duration
 }
 var file_google_protobuf_duration_proto_depIdxs = []int32{
@@ -339,25 +337,11 @@ func file_google_protobuf_duration_proto_init() {
 	if File_google_protobuf_duration_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Duration); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_google_protobuf_duration_proto_rawDesc,
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)),
 			NumEnums:      0,
 			NumMessages:   1,
 			NumExtensions: 0,
@@ -368,7 +352,6 @@ func file_google_protobuf_duration_proto_init() {
 		MessageInfos:      file_google_protobuf_duration_proto_msgTypes,
 	}.Build()
 	File_google_protobuf_duration_proto = out.File
-	file_google_protobuf_duration_proto_rawDesc = nil
 	file_google_protobuf_duration_proto_goTypes = nil
 	file_google_protobuf_duration_proto_depIdxs = nil
 }
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index e8789cb3..041feb0f 100644
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -83,6 +83,7 @@ import (
 	sort "sort"
 	strings "strings"
 	sync "sync"
+	unsafe "unsafe"
 )
 
 // `FieldMask` represents a set of symbolic field paths, for example:
@@ -284,12 +285,11 @@ import (
 // request should verify the included field paths, and return an
 // `INVALID_ARGUMENT` error if any path is unmappable.
 type FieldMask struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The set of field mask paths.
-	Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+	Paths         []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // New constructs a field mask from a list of paths and verifies that
@@ -467,11 +467,9 @@ func rangeFields(path string, f func(field string) bool) bool {
 
 func (x *FieldMask) Reset() {
 	*x = FieldMask{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FieldMask) String() string {
@@ -482,7 +480,7 @@ func (*FieldMask) ProtoMessage() {}
 
 func (x *FieldMask) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -506,7 +504,7 @@ func (x *FieldMask) GetPaths() []string {
 
 var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_field_mask_proto_rawDesc = []byte{
+var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{
 	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
 	0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
 	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
@@ -522,22 +520,22 @@ var file_google_protobuf_field_mask_proto_rawDesc = []byte{
 	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
 	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
 	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
 
 var (
 	file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
-	file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc
+	file_google_protobuf_field_mask_proto_rawDescData []byte
 )
 
 func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
 	file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
-		file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData)
+		file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)))
 	})
 	return file_google_protobuf_field_mask_proto_rawDescData
 }
 
 var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_field_mask_proto_goTypes = []interface{}{
+var file_google_protobuf_field_mask_proto_goTypes = []any{
 	(*FieldMask)(nil), // 0: google.protobuf.FieldMask
 }
 var file_google_protobuf_field_mask_proto_depIdxs = []int32{
@@ -553,25 +551,11 @@ func file_google_protobuf_field_mask_proto_init() {
 	if File_google_protobuf_field_mask_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FieldMask); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc,
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)),
 			NumEnums:      0,
 			NumMessages:   1,
 			NumExtensions: 0,
@@ -582,7 +566,6 @@ func file_google_protobuf_field_mask_proto_init() {
 		MessageInfos:      file_google_protobuf_field_mask_proto_msgTypes,
 	}.Build()
 	File_google_protobuf_field_mask_proto = out.File
-	file_google_protobuf_field_mask_proto_rawDesc = nil
 	file_google_protobuf_field_mask_proto_goTypes = nil
 	file_google_protobuf_field_mask_proto_depIdxs = nil
 }
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index d2bac8b8..ecdd31ab 100644
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -49,11 +49,11 @@
 // The standard Go "encoding/json" package has functionality to serialize
 // arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
 // ListValue.AsSlice methods can convert the protobuf message representation into
-// a form represented by interface{}, map[string]interface{}, and []interface{}.
+// a form represented by any, map[string]any, and []any.
 // This form can be used with other packages that operate on such data structures
 // and also directly with the standard json package.
 //
-// In order to convert the interface{}, map[string]interface{}, and []interface{}
+// In order to convert the any, map[string]any, and []any
 // forms back as Value, Struct, and ListValue messages, use the NewStruct,
 // NewList, and NewValue constructor functions.
 //
@@ -88,28 +88,28 @@
 //
 // To construct a Value message representing the above JSON object:
 //
-//	m, err := structpb.NewValue(map[string]interface{}{
+//	m, err := structpb.NewValue(map[string]any{
 //		"firstName": "John",
 //		"lastName":  "Smith",
 //		"isAlive":   true,
 //		"age":       27,
-//		"address": map[string]interface{}{
+//		"address": map[string]any{
 //			"streetAddress": "21 2nd Street",
 //			"city":          "New York",
 //			"state":         "NY",
 //			"postalCode":    "10021-3100",
 //		},
-//		"phoneNumbers": []interface{}{
-//			map[string]interface{}{
+//		"phoneNumbers": []any{
+//			map[string]any{
 //				"type":   "home",
 //				"number": "212 555-1234",
 //			},
-//			map[string]interface{}{
+//			map[string]any{
 //				"type":   "office",
 //				"number": "646 555-4567",
 //			},
 //		},
-//		"children": []interface{}{},
+//		"children": []any{},
 //		"spouse":   nil,
 //	})
 //	if err != nil {
@@ -120,6 +120,7 @@ package structpb
 
 import (
 	base64 "encoding/base64"
+	json "encoding/json"
 	protojson "google.golang.org/protobuf/encoding/protojson"
 	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
 	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -127,6 +128,7 @@ import (
 	reflect "reflect"
 	sync "sync"
 	utf8 "unicode/utf8"
+	unsafe "unsafe"
 )
 
 // `NullValue` is a singleton enumeration to represent the null value for the
@@ -186,18 +188,17 @@ func (NullValue) EnumDescriptor() ([]byte, []int) {
 //
 // The JSON representation for `Struct` is JSON object.
 type Struct struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Unordered map of dynamically typed values.
-	Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Fields        map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // NewStruct constructs a Struct from a general-purpose Go map.
 // The map keys must be valid UTF-8.
 // The map values are converted using NewValue.
-func NewStruct(v map[string]interface{}) (*Struct, error) {
+func NewStruct(v map[string]any) (*Struct, error) {
 	x := &Struct{Fields: make(map[string]*Value, len(v))}
 	for k, v := range v {
 		if !utf8.ValidString(k) {
@@ -214,9 +215,9 @@ func NewStruct(v map[string]interface{}) (*Struct, error) {
 
 // AsMap converts x to a general-purpose Go map.
 // The map values are converted by calling Value.AsInterface.
-func (x *Struct) AsMap() map[string]interface{} {
+func (x *Struct) AsMap() map[string]any {
 	f := x.GetFields()
-	vs := make(map[string]interface{}, len(f))
+	vs := make(map[string]any, len(f))
 	for k, v := range f {
 		vs[k] = v.AsInterface()
 	}
@@ -233,11 +234,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error {
 
 func (x *Struct) Reset() {
 	*x = Struct{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_struct_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_struct_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Struct) String() string {
@@ -248,7 +247,7 @@ func (*Struct) ProtoMessage() {}
 
 func (x *Struct) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_struct_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -277,13 +276,10 @@ func (x *Struct) GetFields() map[string]*Value {
 //
 // The JSON representation for `Value` is JSON value.
 type Value struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The kind of value.
 	//
-	// Types that are assignable to Kind:
+	// Types that are valid to be assigned to Kind:
 	//
 	//	*Value_NullValue
 	//	*Value_NumberValue
@@ -291,28 +287,31 @@ type Value struct {
 	//	*Value_BoolValue
 	//	*Value_StructValue
 	//	*Value_ListValue
-	Kind isValue_Kind `protobuf_oneof:"kind"`
+	Kind          isValue_Kind `protobuf_oneof:"kind"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // NewValue constructs a Value from a general-purpose Go interface.
 //
-//	╔════════════════════════╤════════════════════════════════════════════╗
-//	║ Go type                │ Conversion                                 ║
-//	╠════════════════════════╪════════════════════════════════════════════╣
-//	║ nil                    │ stored as NullValue                        ║
-//	║ bool                   │ stored as BoolValue                        ║
-//	║ int, int32, int64      │ stored as NumberValue                      ║
-//	║ uint, uint32, uint64   │ stored as NumberValue                      ║
-//	║ float32, float64       │ stored as NumberValue                      ║
-//	║ string                 │ stored as StringValue; must be valid UTF-8 ║
-//	║ []byte                 │ stored as StringValue; base64-encoded      ║
-//	║ map[string]interface{} │ stored as StructValue                      ║
-//	║ []interface{}          │ stored as ListValue                        ║
-//	╚════════════════════════╧════════════════════════════════════════════╝
+//	╔═══════════════════════════════════════╤════════════════════════════════════════════╗
+//	║ Go type                               │ Conversion                                 ║
+//	╠═══════════════════════════════════════╪════════════════════════════════════════════╣
+//	║ nil                                   │ stored as NullValue                        ║
+//	║ bool                                  │ stored as BoolValue                        ║
+//	║ int, int8, int16, int32, int64        │ stored as NumberValue                      ║
+//	║ uint, uint8, uint16, uint32, uint64   │ stored as NumberValue                      ║
+//	║ float32, float64                      │ stored as NumberValue                      ║
+//	║ json.Number                           │ stored as NumberValue                      ║
+//	║ string                                │ stored as StringValue; must be valid UTF-8 ║
+//	║ []byte                                │ stored as StringValue; base64-encoded      ║
+//	║ map[string]any                        │ stored as StructValue                      ║
+//	║ []any                                 │ stored as ListValue                        ║
+//	╚═══════════════════════════════════════╧════════════════════════════════════════════╝
 //
 // When converting an int64 or uint64 to a NumberValue, numeric precision loss
 // is possible since they are stored as a float64.
-func NewValue(v interface{}) (*Value, error) {
+func NewValue(v any) (*Value, error) {
 	switch v := v.(type) {
 	case nil:
 		return NewNullValue(), nil
@@ -320,12 +319,20 @@ func NewValue(v interface{}) (*Value, error) {
 		return NewBoolValue(v), nil
 	case int:
 		return NewNumberValue(float64(v)), nil
+	case int8:
+		return NewNumberValue(float64(v)), nil
+	case int16:
+		return NewNumberValue(float64(v)), nil
 	case int32:
 		return NewNumberValue(float64(v)), nil
 	case int64:
 		return NewNumberValue(float64(v)), nil
 	case uint:
 		return NewNumberValue(float64(v)), nil
+	case uint8:
+		return NewNumberValue(float64(v)), nil
+	case uint16:
+		return NewNumberValue(float64(v)), nil
 	case uint32:
 		return NewNumberValue(float64(v)), nil
 	case uint64:
@@ -334,6 +341,12 @@ func NewValue(v interface{}) (*Value, error) {
 		return NewNumberValue(float64(v)), nil
 	case float64:
 		return NewNumberValue(float64(v)), nil
+	case json.Number:
+		n, err := v.Float64()
+		if err != nil {
+			return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err)
+		}
+		return NewNumberValue(n), nil
 	case string:
 		if !utf8.ValidString(v) {
 			return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
@@ -342,13 +355,13 @@ func NewValue(v interface{}) (*Value, error) {
 	case []byte:
 		s := base64.StdEncoding.EncodeToString(v)
 		return NewStringValue(s), nil
-	case map[string]interface{}:
+	case map[string]any:
 		v2, err := NewStruct(v)
 		if err != nil {
 			return nil, err
 		}
 		return NewStructValue(v2), nil
-	case []interface{}:
+	case []any:
 		v2, err := NewList(v)
 		if err != nil {
 			return nil, err
@@ -396,7 +409,7 @@ func NewListValue(v *ListValue) *Value {
 //
 // Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are
 // converted as strings to remain compatible with MarshalJSON.
-func (x *Value) AsInterface() interface{} {
+func (x *Value) AsInterface() any {
 	switch v := x.GetKind().(type) {
 	case *Value_NumberValue:
 		if v != nil {
@@ -441,11 +454,9 @@ func (x *Value) UnmarshalJSON(b []byte) error {
 
 func (x *Value) Reset() {
 	*x = Value{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_struct_proto_msgTypes[1]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_struct_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Value) String() string {
@@ -456,7 +467,7 @@ func (*Value) ProtoMessage() {}
 
 func (x *Value) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_struct_proto_msgTypes[1]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -471,51 +482,63 @@ func (*Value) Descriptor() ([]byte, []int) {
 	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
 }
 
-func (m *Value) GetKind() isValue_Kind {
-	if m != nil {
-		return m.Kind
+func (x *Value) GetKind() isValue_Kind {
+	if x != nil {
+		return x.Kind
 	}
 	return nil
 }
 
 func (x *Value) GetNullValue() NullValue {
-	if x, ok := x.GetKind().(*Value_NullValue); ok {
-		return x.NullValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_NullValue); ok {
+			return x.NullValue
+		}
 	}
 	return NullValue_NULL_VALUE
 }
 
 func (x *Value) GetNumberValue() float64 {
-	if x, ok := x.GetKind().(*Value_NumberValue); ok {
-		return x.NumberValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_NumberValue); ok {
+			return x.NumberValue
+		}
 	}
 	return 0
 }
 
 func (x *Value) GetStringValue() string {
-	if x, ok := x.GetKind().(*Value_StringValue); ok {
-		return x.StringValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_StringValue); ok {
+			return x.StringValue
+		}
 	}
 	return ""
 }
 
 func (x *Value) GetBoolValue() bool {
-	if x, ok := x.GetKind().(*Value_BoolValue); ok {
-		return x.BoolValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_BoolValue); ok {
+			return x.BoolValue
+		}
 	}
 	return false
 }
 
 func (x *Value) GetStructValue() *Struct {
-	if x, ok := x.GetKind().(*Value_StructValue); ok {
-		return x.StructValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_StructValue); ok {
+			return x.StructValue
+		}
 	}
 	return nil
 }
 
 func (x *Value) GetListValue() *ListValue {
-	if x, ok := x.GetKind().(*Value_ListValue); ok {
-		return x.ListValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_ListValue); ok {
+			return x.ListValue
+		}
 	}
 	return nil
 }
@@ -570,17 +593,16 @@ func (*Value_ListValue) isValue_Kind() {}
 //
 // The JSON representation for `ListValue` is JSON array.
 type ListValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Repeated field of dynamically typed values.
-	Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+	Values        []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // NewList constructs a ListValue from a general-purpose Go slice.
 // The slice elements are converted using NewValue.
-func NewList(v []interface{}) (*ListValue, error) {
+func NewList(v []any) (*ListValue, error) {
 	x := &ListValue{Values: make([]*Value, len(v))}
 	for i, v := range v {
 		var err error
@@ -594,9 +616,9 @@ func NewList(v []interface{}) (*ListValue, error) {
 
 // AsSlice converts x to a general-purpose Go slice.
 // The slice elements are converted by calling Value.AsInterface.
-func (x *ListValue) AsSlice() []interface{} {
+func (x *ListValue) AsSlice() []any {
 	vals := x.GetValues()
-	vs := make([]interface{}, len(vals))
+	vs := make([]any, len(vals))
 	for i, v := range vals {
 		vs[i] = v.AsInterface()
 	}
@@ -613,11 +635,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error {
 
 func (x *ListValue) Reset() {
 	*x = ListValue{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_struct_proto_msgTypes[2]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_struct_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ListValue) String() string {
@@ -628,7 +648,7 @@ func (*ListValue) ProtoMessage() {}
 
 func (x *ListValue) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_struct_proto_msgTypes[2]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -652,7 +672,7 @@ func (x *ListValue) GetValues() []*Value {
 
 var File_google_protobuf_struct_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_struct_proto_rawDesc = []byte{
+var file_google_protobuf_struct_proto_rawDesc = string([]byte{
 	0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
 	0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
 	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
@@ -700,23 +720,23 @@ var file_google_protobuf_struct_proto_rawDesc = []byte{
 	0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
 	0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
 	0x6f, 0x33,
-}
+})
 
 var (
 	file_google_protobuf_struct_proto_rawDescOnce sync.Once
-	file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc
+	file_google_protobuf_struct_proto_rawDescData []byte
 )
 
 func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
 	file_google_protobuf_struct_proto_rawDescOnce.Do(func() {
-		file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData)
+		file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)))
 	})
 	return file_google_protobuf_struct_proto_rawDescData
 }
 
 var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
 var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_google_protobuf_struct_proto_goTypes = []interface{}{
+var file_google_protobuf_struct_proto_goTypes = []any{
 	(NullValue)(0),    // 0: google.protobuf.NullValue
 	(*Struct)(nil),    // 1: google.protobuf.Struct
 	(*Value)(nil),     // 2: google.protobuf.Value
@@ -742,45 +762,7 @@ func file_google_protobuf_struct_proto_init() {
 	if File_google_protobuf_struct_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Struct); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Value); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ListValue); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
-	file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{
+	file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{
 		(*Value_NullValue)(nil),
 		(*Value_NumberValue)(nil),
 		(*Value_StringValue)(nil),
@@ -792,7 +774,7 @@ func file_google_protobuf_struct_proto_init() {
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_google_protobuf_struct_proto_rawDesc,
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)),
 			NumEnums:      1,
 			NumMessages:   4,
 			NumExtensions: 0,
@@ -804,7 +786,6 @@ func file_google_protobuf_struct_proto_init() {
 		MessageInfos:      file_google_protobuf_struct_proto_msgTypes,
 	}.Build()
 	File_google_protobuf_struct_proto = out.File
-	file_google_protobuf_struct_proto_rawDesc = nil
 	file_google_protobuf_struct_proto_goTypes = nil
 	file_google_protobuf_struct_proto_depIdxs = nil
 }
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 81511a33..00ac835c 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -78,6 +78,7 @@ import (
 	reflect "reflect"
 	sync "sync"
 	time "time"
+	unsafe "unsafe"
 )
 
 // A Timestamp represents a point in time independent of any time zone or local
@@ -170,10 +171,7 @@ import (
 // http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
 // ) to obtain a formatter capable of generating timestamps in this format.
 type Timestamp struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Represents seconds of UTC time since Unix epoch
 	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
 	// 9999-12-31T23:59:59Z inclusive.
@@ -182,7 +180,9 @@ type Timestamp struct {
 	// second values with fractions must still have non-negative nanos values
 	// that count forward in time. Must be from 0 to 999,999,999
 	// inclusive.
-	Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	Nanos         int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Now constructs a new Timestamp from the current time.
@@ -254,11 +254,9 @@ func (x *Timestamp) check() uint {
 
 func (x *Timestamp) Reset() {
 	*x = Timestamp{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Timestamp) String() string {
@@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {}
 
 func (x *Timestamp) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -300,7 +298,7 @@ func (x *Timestamp) GetNanos() int32 {
 
 var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_timestamp_proto_rawDesc = []byte{
+var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{
 	0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
 	0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
 	0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
@@ -317,22 +315,22 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{
 	0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
 	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
 	0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
 
 var (
 	file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
-	file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc
+	file_google_protobuf_timestamp_proto_rawDescData []byte
 )
 
 func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
 	file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() {
-		file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData)
+		file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)))
 	})
 	return file_google_protobuf_timestamp_proto_rawDescData
 }
 
 var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_timestamp_proto_goTypes = []interface{}{
+var file_google_protobuf_timestamp_proto_goTypes = []any{
 	(*Timestamp)(nil), // 0: google.protobuf.Timestamp
 }
 var file_google_protobuf_timestamp_proto_depIdxs = []int32{
@@ -348,25 +346,11 @@ func file_google_protobuf_timestamp_proto_init() {
 	if File_google_protobuf_timestamp_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Timestamp); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc,
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)),
 			NumEnums:      0,
 			NumMessages:   1,
 			NumExtensions: 0,
@@ -377,7 +361,6 @@ func file_google_protobuf_timestamp_proto_init() {
 		MessageInfos:      file_google_protobuf_timestamp_proto_msgTypes,
 	}.Build()
 	File_google_protobuf_timestamp_proto = out.File
-	file_google_protobuf_timestamp_proto_rawDesc = nil
 	file_google_protobuf_timestamp_proto_goTypes = nil
 	file_google_protobuf_timestamp_proto_depIdxs = nil
 }
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index 762a8713..5de53010 100644
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -48,18 +48,18 @@ import (
 	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
 	reflect "reflect"
 	sync "sync"
+	unsafe "unsafe"
 )
 
 // Wrapper message for `double`.
 //
 // The JSON representation for `DoubleValue` is JSON number.
 type DoubleValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The double value.
-	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Double stores v in a new DoubleValue and returns a pointer to it.
@@ -69,11 +69,9 @@ func Double(v float64) *DoubleValue {
 
 func (x *DoubleValue) Reset() {
 	*x = DoubleValue{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *DoubleValue) String() string {
@@ -84,7 +82,7 @@ func (*DoubleValue) ProtoMessage() {}
 
 func (x *DoubleValue) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -110,12 +108,11 @@ func (x *DoubleValue) GetValue() float64 {
 //
 // The JSON representation for `FloatValue` is JSON number.
 type FloatValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The float value.
-	Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Float stores v in a new FloatValue and returns a pointer to it.
@@ -125,11 +122,9 @@ func Float(v float32) *FloatValue {
 
 func (x *FloatValue) Reset() {
 	*x = FloatValue{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FloatValue) String() string {
@@ -140,7 +135,7 @@ func (*FloatValue) ProtoMessage() {}
 
 func (x *FloatValue) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -166,12 +161,11 @@ func (x *FloatValue) GetValue() float32 {
 //
 // The JSON representation for `Int64Value` is JSON string.
 type Int64Value struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The int64 value.
-	Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Int64 stores v in a new Int64Value and returns a pointer to it.
@@ -181,11 +175,9 @@ func Int64(v int64) *Int64Value {
 
 func (x *Int64Value) Reset() {
 	*x = Int64Value{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Int64Value) String() string {
@@ -196,7 +188,7 @@ func (*Int64Value) ProtoMessage() {}
 
 func (x *Int64Value) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -222,12 +214,11 @@ func (x *Int64Value) GetValue() int64 {
 //
 // The JSON representation for `UInt64Value` is JSON string.
 type UInt64Value struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The uint64 value.
-	Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // UInt64 stores v in a new UInt64Value and returns a pointer to it.
@@ -237,11 +228,9 @@ func UInt64(v uint64) *UInt64Value {
 
 func (x *UInt64Value) Reset() {
 	*x = UInt64Value{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *UInt64Value) String() string {
@@ -252,7 +241,7 @@ func (*UInt64Value) ProtoMessage() {}
 
 func (x *UInt64Value) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -278,12 +267,11 @@ func (x *UInt64Value) GetValue() uint64 {
 //
 // The JSON representation for `Int32Value` is JSON number.
 type Int32Value struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The int32 value.
-	Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Int32 stores v in a new Int32Value and returns a pointer to it.
@@ -293,11 +281,9 @@ func Int32(v int32) *Int32Value {
 
 func (x *Int32Value) Reset() {
 	*x = Int32Value{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Int32Value) String() string {
@@ -308,7 +294,7 @@ func (*Int32Value) ProtoMessage() {}
 
 func (x *Int32Value) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -334,12 +320,11 @@ func (x *Int32Value) GetValue() int32 {
 //
 // The JSON representation for `UInt32Value` is JSON number.
 type UInt32Value struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The uint32 value.
-	Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // UInt32 stores v in a new UInt32Value and returns a pointer to it.
@@ -349,11 +334,9 @@ func UInt32(v uint32) *UInt32Value {
 
 func (x *UInt32Value) Reset() {
 	*x = UInt32Value{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *UInt32Value) String() string {
@@ -364,7 +347,7 @@ func (*UInt32Value) ProtoMessage() {}
 
 func (x *UInt32Value) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -390,12 +373,11 @@ func (x *UInt32Value) GetValue() uint32 {
 //
 // The JSON representation for `BoolValue` is JSON `true` and `false`.
 type BoolValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The bool value.
-	Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Bool stores v in a new BoolValue and returns a pointer to it.
@@ -405,11 +387,9 @@ func Bool(v bool) *BoolValue {
 
 func (x *BoolValue) Reset() {
 	*x = BoolValue{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *BoolValue) String() string {
@@ -420,7 +400,7 @@ func (*BoolValue) ProtoMessage() {}
 
 func (x *BoolValue) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -446,12 +426,11 @@ func (x *BoolValue) GetValue() bool {
 //
 // The JSON representation for `StringValue` is JSON string.
 type StringValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The string value.
-	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // String stores v in a new StringValue and returns a pointer to it.
@@ -461,11 +440,9 @@ func String(v string) *StringValue {
 
 func (x *StringValue) Reset() {
 	*x = StringValue{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *StringValue) String() string {
@@ -476,7 +453,7 @@ func (*StringValue) ProtoMessage() {}
 
 func (x *StringValue) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -502,12 +479,11 @@ func (x *StringValue) GetValue() string {
 //
 // The JSON representation for `BytesValue` is JSON string.
 type BytesValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The bytes value.
-	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Bytes stores v in a new BytesValue and returns a pointer to it.
@@ -517,11 +493,9 @@ func Bytes(v []byte) *BytesValue {
 
 func (x *BytesValue) Reset() {
 	*x = BytesValue{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *BytesValue) String() string {
@@ -532,7 +506,7 @@ func (*BytesValue) ProtoMessage() {}
 
 func (x *BytesValue) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -556,7 +530,7 @@ func (x *BytesValue) GetValue() []byte {
 
 var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_wrappers_proto_rawDesc = []byte{
+var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{
 	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
 	0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
 	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
@@ -590,22 +564,22 @@ var file_google_protobuf_wrappers_proto_rawDesc = []byte{
 	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
 	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
 	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
 
 var (
 	file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
-	file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc
+	file_google_protobuf_wrappers_proto_rawDescData []byte
 )
 
 func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
 	file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() {
-		file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData)
+		file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)))
 	})
 	return file_google_protobuf_wrappers_proto_rawDescData
 }
 
 var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
-var file_google_protobuf_wrappers_proto_goTypes = []interface{}{
+var file_google_protobuf_wrappers_proto_goTypes = []any{
 	(*DoubleValue)(nil), // 0: google.protobuf.DoubleValue
 	(*FloatValue)(nil),  // 1: google.protobuf.FloatValue
 	(*Int64Value)(nil),  // 2: google.protobuf.Int64Value
@@ -629,121 +603,11 @@ func file_google_protobuf_wrappers_proto_init() {
 	if File_google_protobuf_wrappers_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*DoubleValue); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*FloatValue); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Int64Value); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*UInt64Value); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*Int32Value); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*UInt32Value); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*BoolValue); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*StringValue); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*BytesValue); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc,
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)),
 			NumEnums:      0,
 			NumMessages:   9,
 			NumExtensions: 0,
@@ -754,7 +618,6 @@ func file_google_protobuf_wrappers_proto_init() {
 		MessageInfos:      file_google_protobuf_wrappers_proto_msgTypes,
 	}.Build()
 	File_google_protobuf_wrappers_proto = out.File
-	file_google_protobuf_wrappers_proto_rawDesc = nil
 	file_google_protobuf_wrappers_proto_goTypes = nil
 	file_google_protobuf_wrappers_proto_depIdxs = nil
 }
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 6ae88d40..16fd6984 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -89,22 +89,22 @@ github.com/go-chi/chi/v5
 # github.com/go-chi/cors v1.2.1
 ## explicit; go 1.14
 github.com/go-chi/cors
-# github.com/go-jose/go-jose/v4 v4.0.1
-## explicit; go 1.21
+# github.com/go-jose/go-jose/v4 v4.1.0
+## explicit; go 1.24
 github.com/go-jose/go-jose/v4
 github.com/go-jose/go-jose/v4/cipher
 github.com/go-jose/go-jose/v4/json
 github.com/go-jose/go-jose/v4/jwt
-# github.com/go-logr/logr v1.4.1
+# github.com/go-logr/logr v1.4.2
 ## explicit; go 1.18
 github.com/go-logr/logr
 github.com/go-logr/logr/funcr
 # github.com/go-logr/stdr v1.2.2
 ## explicit; go 1.16
 github.com/go-logr/stdr
-# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572
-## explicit; go 1.13
-github.com/go-task/slim-sprig
+# github.com/go-task/slim-sprig/v3 v3.0.0
+## explicit; go 1.20
+github.com/go-task/slim-sprig/v3
 # github.com/gobwas/httphead v0.1.0
 ## explicit; go 1.15
 github.com/gobwas/httphead
@@ -125,8 +125,8 @@ github.com/golang/protobuf/proto
 ## explicit; go 1.12
 github.com/google/gopacket
 github.com/google/gopacket/layers
-# github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b
-## explicit; go 1.19
+# github.com/google/pprof v0.0.0-20250418163039-24c5476c6587
+## explicit; go 1.23
 github.com/google/pprof/profile
 # github.com/google/uuid v1.6.0
 ## explicit
@@ -148,8 +148,6 @@ github.com/json-iterator/go
 # github.com/klauspost/compress v1.15.11
 ## explicit; go 1.17
 github.com/klauspost/compress/flate
-# github.com/kr/text v0.2.0
-## explicit
 # github.com/kylelemons/godebug v1.1.0
 ## explicit; go 1.11
 # github.com/mattn/go-colorable v0.1.13
@@ -173,8 +171,8 @@ github.com/modern-go/concurrent
 # github.com/modern-go/reflect2 v1.0.2
 ## explicit; go 1.12
 github.com/modern-go/reflect2
-# github.com/onsi/ginkgo/v2 v2.13.0
-## explicit; go 1.18
+# github.com/onsi/ginkgo/v2 v2.23.4
+## explicit; go 1.23.0
 github.com/onsi/ginkgo/v2/config
 github.com/onsi/ginkgo/v2/formatter
 github.com/onsi/ginkgo/v2/ginkgo
@@ -221,17 +219,15 @@ github.com/prometheus/common/model
 github.com/prometheus/procfs
 github.com/prometheus/procfs/internal/fs
 github.com/prometheus/procfs/internal/util
-# github.com/quic-go/quic-go v0.45.0 => github.com/chungthuang/quic-go v0.45.1-0.20250128102735-2687bd175910
-## explicit; go 1.21
+# github.com/quic-go/quic-go v0.51.0 => github.com/chungthuang/quic-go v0.45.1-0.20250428085412-43229ad201fd
+## explicit; go 1.23
 github.com/quic-go/quic-go
 github.com/quic-go/quic-go/internal/ackhandler
 github.com/quic-go/quic-go/internal/congestion
 github.com/quic-go/quic-go/internal/flowcontrol
 github.com/quic-go/quic-go/internal/handshake
-github.com/quic-go/quic-go/internal/logutils
 github.com/quic-go/quic-go/internal/protocol
 github.com/quic-go/quic-go/internal/qerr
-github.com/quic-go/quic-go/internal/qtls
 github.com/quic-go/quic-go/internal/utils
 github.com/quic-go/quic-go/internal/utils/linkedlist
 github.com/quic-go/quic-go/internal/utils/ringbuffer
@@ -247,9 +243,10 @@ github.com/rs/zerolog/log
 # github.com/russross/blackfriday/v2 v2.1.0
 ## explicit
 github.com/russross/blackfriday/v2
-# github.com/stretchr/testify v1.9.0
+# github.com/stretchr/testify v1.10.0
 ## explicit; go 1.17
 github.com/stretchr/testify/assert
+github.com/stretchr/testify/assert/yaml
 github.com/stretchr/testify/require
 # github.com/urfave/cli/v2 v2.3.0 => github.com/ipostelnik/cli/v2 v2.3.1-0.20210324024421-b6ea8234fe3d
 ## explicit; go 1.11
@@ -299,18 +296,19 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1
 go.opentelemetry.io/proto/otlp/common/v1
 go.opentelemetry.io/proto/otlp/resource/v1
 go.opentelemetry.io/proto/otlp/trace/v1
-# go.uber.org/automaxprocs v1.4.0
-## explicit; go 1.13
+# go.uber.org/automaxprocs v1.6.0
+## explicit; go 1.20
+go.uber.org/automaxprocs
 go.uber.org/automaxprocs/internal/cgroups
 go.uber.org/automaxprocs/internal/runtime
 go.uber.org/automaxprocs/maxprocs
-# go.uber.org/mock v0.5.0
+# go.uber.org/mock v0.5.1
 ## explicit; go 1.22
 go.uber.org/mock/gomock
 go.uber.org/mock/mockgen
 go.uber.org/mock/mockgen/model
-# golang.org/x/crypto v0.31.0
-## explicit; go 1.20
+# golang.org/x/crypto v0.37.0
+## explicit; go 1.23.0
 golang.org/x/crypto/blake2b
 golang.org/x/crypto/blowfish
 golang.org/x/crypto/chacha20
@@ -321,21 +319,17 @@ golang.org/x/crypto/internal/alias
 golang.org/x/crypto/internal/poly1305
 golang.org/x/crypto/nacl/box
 golang.org/x/crypto/nacl/secretbox
-golang.org/x/crypto/pbkdf2
 golang.org/x/crypto/salsa20/salsa
 golang.org/x/crypto/ssh
 golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
-# golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
-## explicit; go 1.20
-golang.org/x/exp/rand
-# golang.org/x/mod v0.18.0
-## explicit; go 1.18
+# golang.org/x/mod v0.24.0
+## explicit; go 1.23.0
 golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.26.0
-## explicit; go 1.18
+# golang.org/x/net v0.39.0
+## explicit; go 1.23.0
 golang.org/x/net/bpf
 golang.org/x/net/context
 golang.org/x/net/http/httpguts
@@ -343,6 +337,7 @@ golang.org/x/net/http2
 golang.org/x/net/http2/hpack
 golang.org/x/net/icmp
 golang.org/x/net/idna
+golang.org/x/net/internal/httpcommon
 golang.org/x/net/internal/iana
 golang.org/x/net/internal/socket
 golang.org/x/net/internal/socks
@@ -353,15 +348,15 @@ golang.org/x/net/nettest
 golang.org/x/net/proxy
 golang.org/x/net/trace
 golang.org/x/net/websocket
-# golang.org/x/oauth2 v0.18.0
-## explicit; go 1.18
+# golang.org/x/oauth2 v0.29.0
+## explicit; go 1.23.0
 golang.org/x/oauth2
 golang.org/x/oauth2/internal
-# golang.org/x/sync v0.10.0
-## explicit; go 1.18
+# golang.org/x/sync v0.13.0
+## explicit; go 1.23.0
 golang.org/x/sync/errgroup
-# golang.org/x/sys v0.28.0
-## explicit; go 1.18
+# golang.org/x/sys v0.32.0
+## explicit; go 1.23.0
 golang.org/x/sys/cpu
 golang.org/x/sys/execabs
 golang.org/x/sys/plan9
@@ -371,11 +366,11 @@ golang.org/x/sys/windows/registry
 golang.org/x/sys/windows/svc
 golang.org/x/sys/windows/svc/eventlog
 golang.org/x/sys/windows/svc/mgr
-# golang.org/x/term v0.27.0
-## explicit; go 1.18
+# golang.org/x/term v0.31.0
+## explicit; go 1.23.0
 golang.org/x/term
-# golang.org/x/text v0.21.0
-## explicit; go 1.18
+# golang.org/x/text v0.24.0
+## explicit; go 1.23.0
 golang.org/x/text/cases
 golang.org/x/text/internal
 golang.org/x/text/internal/language
@@ -386,16 +381,18 @@ golang.org/x/text/secure/bidirule
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
 golang.org/x/text/unicode/norm
-# golang.org/x/tools v0.22.0
-## explicit; go 1.19
+# golang.org/x/tools v0.32.0
+## explicit; go 1.23.0
+golang.org/x/tools/cover
 golang.org/x/tools/go/ast/astutil
 golang.org/x/tools/go/ast/inspector
 golang.org/x/tools/go/gcexportdata
-golang.org/x/tools/go/internal/packagesdriver
 golang.org/x/tools/go/packages
 golang.org/x/tools/go/types/objectpath
+golang.org/x/tools/go/types/typeutil
 golang.org/x/tools/imports
 golang.org/x/tools/internal/aliases
+golang.org/x/tools/internal/astutil/edge
 golang.org/x/tools/internal/event
 golang.org/x/tools/internal/event/core
 golang.org/x/tools/internal/event/keys
@@ -404,21 +401,13 @@ golang.org/x/tools/internal/gcimporter
 golang.org/x/tools/internal/gocommand
 golang.org/x/tools/internal/gopathwalk
 golang.org/x/tools/internal/imports
+golang.org/x/tools/internal/modindex
 golang.org/x/tools/internal/packagesinternal
 golang.org/x/tools/internal/pkgbits
 golang.org/x/tools/internal/stdlib
-golang.org/x/tools/internal/tokeninternal
+golang.org/x/tools/internal/typeparams
 golang.org/x/tools/internal/typesinternal
 golang.org/x/tools/internal/versions
-# google.golang.org/appengine v1.6.8
-## explicit; go 1.11
-google.golang.org/appengine/internal
-google.golang.org/appengine/internal/base
-google.golang.org/appengine/internal/datastore
-google.golang.org/appengine/internal/log
-google.golang.org/appengine/internal/remote_api
-google.golang.org/appengine/internal/urlfetch
-google.golang.org/appengine/urlfetch
 # google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2
 ## explicit; go 1.19
 google.golang.org/genproto/googleapis/api/httpbody
@@ -479,8 +468,8 @@ google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/stats
 google.golang.org/grpc/status
 google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.34.1
-## explicit; go 1.17
+# google.golang.org/protobuf v1.36.5
+## explicit; go 1.21
 google.golang.org/protobuf/encoding/protodelim
 google.golang.org/protobuf/encoding/protojson
 google.golang.org/protobuf/encoding/prototext
@@ -503,6 +492,7 @@ google.golang.org/protobuf/internal/genid
 google.golang.org/protobuf/internal/impl
 google.golang.org/protobuf/internal/order
 google.golang.org/protobuf/internal/pragma
+google.golang.org/protobuf/internal/protolazy
 google.golang.org/protobuf/internal/set
 google.golang.org/protobuf/internal/strs
 google.golang.org/protobuf/internal/version
@@ -556,4 +546,4 @@ zombiezen.com/go/capnproto2/std/capnp/rpc
 # github.com/urfave/cli/v2 => github.com/ipostelnik/cli/v2 v2.3.1-0.20210324024421-b6ea8234fe3d
 # github.com/prometheus/golang_client => github.com/prometheus/golang_client v1.12.1
 # gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
-# github.com/quic-go/quic-go => github.com/chungthuang/quic-go v0.45.1-0.20250128102735-2687bd175910
+# github.com/quic-go/quic-go => github.com/chungthuang/quic-go v0.45.1-0.20250428085412-43229ad201fd