From 3c078352aa9ec221ef34d834df3f4b1b1481707b Mon Sep 17 00:00:00 2001
From: Knative Automation <automation@knative.team>
Date: Mon, 16 Dec 2024 01:40:57 +0000
Subject: [PATCH] upgrade to latest dependencies

bumping golang.org/x/tools 2ab3b51...4d2b19f:
  > 4d2b19f go.mod: update golang.org/x dependencies
  > 6368677 gopls/internal/golang: strength reduce ComputeImportFixEdits
  > 777f155 gopls/internal/golang: show package attributes on hover
  > 8a0e08f gopls/doc: add missing doc link
  > 61415be gopls/internal/cache: guard against malformed paths in port.matches
  > 9a89d3a internal/analysisinternal: avoid sub-token spans in TypeErrorEndPos
  > 1115af6 internal/expect: support named arguments f(a, b, c=d, e="f")
  > 0b9e499 go/{expect,packages/packagestest}: mention the tag+delete proposal
  > efcd2bd internal/packagestest: fork go/packages/packagestest
  > 0e9ed3d go/packages: do not mutate Config
  > ca2b41b x/tools: use internal/expect instead of go/expect
  > b22f1ad internal/expect: fork go/expect
  > f1ae722 gopls/internal/semtok: change types.Named to types.Basic for iota, true, and false
  > dba5486 gopls: update x/telemetry to pick up fix for countertest.ReadCounter
  > 9d40727 gopls/internal/server: don't interact with os.UserConfigDir from tests
  > 59933b6 go/packages: create fewer goroutines
  > f1f7c26 gopls/internal/cache: ensure GO111MODULE is unset in GOPATH tests
  > 4f98d3f gopls/internal/golang: run testcases as subtests
  > 6d27bba gopls/internal/golang: add testcase handling for func with error returns
  > e5417d7 gopls/internal/cache: log go env in TestZeroConfigAlgorithm
  > 691997a gopls/internal/golang: consolidate imports from both file in qualifier
  > 0c792f1 gopls/internal/golang: support generating test for functions
  > 27e1a3a go/packages: ensure TypesInfo is set when NeedTypesInfo is enabled
  > 817c7bc gopls/internal/test/integration/workspace: fix TestStdWorkspace
  > e36459f gopls/internal/golang: generate test name for selected function/method
  > 2998e9a go/analysis/passes/lostcancel: add WithCancelCause et al
  > f0379e0 go/packages: add BenchmarkNetHTTP
  > ad28b93 go/packages: minor cleanups to loader.parseFiles
  > cceaf96 internal/imports: carve out a Source interface for index integration
  > f4878ba gopls/internal/golang: use correct imports in HTML pkg doc links
  > 109c5fc gopls/internal/test: fix path to local go in integration tests
  > 99e8fee x/tools: fset.File(file.Pos()) -> fset.File(file.FileStart)
  > 5cd08e2 go/gcexportdata: document 2 releases + tip policy
  > ce03cd6 internal/modindex: parse changed time in local time zone
  > 386503d gopls/internal/golang: add source code action for add test
  > 9d6e1a6 Revert "gopls/internal/analysis: disable ssa/ir analyzers on range-over-func"
  > f153a42 gopls: update honnef.co/go/tools (staticcheck) to v0.5.1
  > 59f79bc go/ssa: speed up TestTypeparamTest by loading tests once
  > 70e82e0 go/analysis/passes/asmdecl: Correct identify writeResult instructions
  > cbd92b1 gopls/internal: stubcalledfunction: improve logic of selecting insert position
  > 45a28e1 all: fix x/tools tests that fail with a go1.23.1 go.work file
  > b0f44d5 copyright: limit copyright checking to .go files
  > 73d6794 gopls/internal/template: fix completion token boundary conditions
  > 91421d7 gopls/internal/cache: share type checking with analysis
  > 36684df go/analysis/passes/unusedwrite: silence if unsafe is imported
  > 17213ba gopls/internal/cache/parsego: support lazy object resolution for Files
  > 7d1d070 go/ssa/interp: disable interp tests on wasm
  > ae56d93 internal/modindex: implement Lookups in the index
  > 044b16f internal/gcimporter: extend markBlack workaround go1.22
  > dabba6a internal/modindex: new API for incremental update
  > 7d196fc go/ssa/interp: fix failing recover2.go test
  > f861377 go/ssa/interp: redirect interpreter std{out,err} to testing.T.Log
  > 9f3c646 gopls/internal/cache: memoize cache keys
  > 1f162c6 gopls/internal/cache: async pull diagnostics and joined analysis
  > dbb8252 gopls/release: remove unused(?) script
  > 6176384 go/types/objectpath: break cycles through interface methods
  > 9e6388a internal/gcimporter: work around go/types data race in 1.23.
  > c457787 gopls/internal/cache: avoid reporting bugs when go/packages has errors
  > 401eca0 gopls/internal/settings: remove "allowImplicitNetworkAccess"
  > 6618569 gopls/internal/cache: refine a bug report related to package metadata
  > 6381f0b gopls/internal/cache: refine bug reports
  > 63e4449 gopls/internal/telemetry/cmd/stacks: print double-claimed stacks
  > f003ff6 gopls/internal/test/marker: rename s/suggestedfix/quickfix/
  > 8128bcf gopls/internal/cache: add tolerance for builtin test variants
  > a199121 gopls: allow for asynchronous request handling
  > 8ecf757 internal/gcimporter: remove test of unsupported "goroot" iimport
  > 7310c72 importgraph: correct typo in graph_test.go
  > ce4cb55 internal/modindex: fix two bugs
  > b3482cc internal/modindex/cmd: Command for maintaining module cache indexes
  > 454be60 x/tools: be defensive after types.Info.Types[expr] lookups
  > dec6bf1 internal/modindex: update module cache index
  > 6c6def2 gopls/internal/telemetry/cmd/stacks: fix bad tmpreaper interaction
  > 1a5fe83 gopls: remove cruft
  > 244a31e gopls/internal: CodeAction:  quickfix to generate missing method
  > 87d6131 internal/typeparams: support parameterized aliases in Free
  > aa87dcf go/analysis/passes: execute gofmt
  > feffeaa go/packages: report an error from Load when GOROOT is misconfigured
  > 50179f2 Revert "internal/aliases: add a function to conditionally enable aliases"
  > 4f6e118 all: set gotypesalias=1 when using >=1.23 toolchain
  > 915132c internal/typesinternal: add NamedOrAlias type
  > bd86f8c gopls/internal/cache/analysis: lazily resolve the import map
  > a4e0a9e cmd/bundle: enable materialized aliases
  > db26c69 cmd/stringer: fix test on android
  > f08b5c1 gopls/internal/test/integration/bench: add a pull diagnostics benchmark
  > bbb979f go/callgraph/vta: use node IDs for type flow graph
  > cf8979b gopls/doc/features: add index of supported Code Actions
  > 8621919 go/ssa/ssautil: disable fmt imports on wasm tests
  > f439874 internal/modindex: add symbol information
  > f21a1dc gopls: add initial support for pull diagnostics
  > c19060b gopls/internal/cache: use packageHandles to hold an active package cache
  > a30b207 internal/versions: remove InitFileVersions
  > de11c55 gopls/doc/codelenses: fix link typo
  > 0b989c8 internal/versions: update test expectations
  > 89a5311 go/analysis/passes/asmdecl: allow syscall write registers implicitly
  > f8f3c13 internal/aliases: add a function to conditionally enable aliases
bumping google.golang.org/genproto/googleapis/api ddb44da...8af14fe:
  > 8af14fe chore(all): update all (# 1159)
  > 7e3bb23 chore(all): auto-regenerate .pb.go files (# 1158)
  > f6391c0 chore(all): update all (# 1156)
  > 3fd189d chore(googleapis): update cloud/gkeconnect/gateway/v1beta1/alias.go (# 1157)
  > 4ba0660 chore: update datastore and bigtable (# 1155)
  > fc7c04a feat: move SoT for datastore proto definitions (# 1154)
  > 278611b chore(all): update all (# 1153)
bumping knative.dev/hack 05b2fb3...1978b3a:
  > 1978b3a Update community files (# 404)
  > b799531 Update community files (# 403)
  > ef6e7e9 Export KO_FLAGS for consuming scripts (# 401)
  > 2191456 Update community files (# 400)
bumping golang.org/x/sync 411f99e...151027e:
  > 151027e README: don't recommend go get
bumping golang.org/x/text 3043346...efd25da:
  > efd25da go.mod: update golang.org/x dependencies
  > 8a0e65e README: don't recommend go get
  > fefda1a internal/texttest: remove Run and Bench helpers
  > a457f47 all: normalize subtest names to NFC
bumping google.golang.org/genproto/googleapis/rpc ddb44da...8af14fe:
  > 8af14fe chore(all): update all (# 1159)
  > 7e3bb23 chore(all): auto-regenerate .pb.go files (# 1158)
  > f6391c0 chore(all): update all (# 1156)
  > 3fd189d chore(googleapis): update cloud/gkeconnect/gateway/v1beta1/alias.go (# 1157)
  > 4ba0660 chore: update datastore and bigtable (# 1155)
  > fc7c04a feat: move SoT for datastore proto definitions (# 1154)
  > 278611b chore(all): update all (# 1153)
bumping knative.dev/pkg 9b9d535...f3ab560:
  > f3ab560 Update community files (# 3118)
  > a7fd9b1 Bump google.golang.org/protobuf from 1.35.1 to 1.35.2 (# 3117)
  > 215048a Bump golang.org/x/tools from 0.26.0 to 0.27.0 (# 3114)
  > 47a6f9f Bump google.golang.org/grpc from 1.67.1 to 1.68.0 (# 3116)
  > 7b333a0 Bump golang.org/x/net from 0.30.0 to 0.31.0 (# 3113)
  > 0a99635 Update community files (# 3112)
  > 25f6002 upgrade to latest dependencies (# 3111)
  > 6af2bf9 upgrade to latest dependencies (# 3110)
  > 76cfa12 Fix `WEBHOOK_DISABLE_NAMESPACE_OWNERSHIP` env-var (# 3107)
  > 6d10851 Add `WEBHOOK_DISABLE_NAMESPACE_OWNERSHIP` env-var (# 3103)
  > 6eb75e8 Update community files (# 3105)
bumping golang.org/x/sys 23b0dab...e0753d4:
  > e0753d4 Revert "windows/mkwinsyscall: use syscall.SyscallN instead of syscall.Syscall{6,9,12,15}"
  > c29efe3 windows: add iphlpapi functions for change notifications
  > 8f2aa9f cpu: conditionally re-enable AVX512 support on darwin/amd64
  > 054f1fc README: don't recommend go get
  > ca04041 unix: extend z/OS support
  > 18e038c unix: move NETLINK_* consts to own section
  > d045236 windows: implement Ftruncate using a single syscall on Windows
  > cff53d5 unix: gofmt after CL 610296
  > 123459f unix: update z/OS implementation of fcntl and mmap
  > df4a4da unix/linux: adjust Dockerfile ENV key value format
  > 696d342 unix: gofmt after CL 621375
  > 60d7877 unix: add PTP_PF_* constants on Linux
  > 256d1df unix: add PTP IOCTLs on Linux
  > adbb8bb windows/mkwinsyscall: use syscall.SyscallN instead of syscall.Syscall{6,9,12,15}
  > a57fdb8 unix: add IoctlGetHwTstamp/IoctlGetHwTstamp on Linux
  > 3932916 unix: switch to Ubuntu 24.10 in Dockerfile
  > 2dfefb6 all: execute gofmt
  > e5eee7e unix: add IoctlGetEthtoolTsInfo on Linux
  > fe162ba unix: add ClockSettime on Linux
  > 7143f4a windows: manually initialize NewNTUnicodeString
bumping google.golang.org/grpc 3f95b38...acba4d3:
  > acba4d3 Change version to 1.68.0 (# 7743)
  > 5363dca credentials: Apply defaults to TLS configs provided through GetConfigForClient (# 7754) (# 7813)
  > 056dc64 status: Fix status incompatibility introduced by # 6919 and move non-regeneratable proto code into /testdata (# 7724) (# 7793)
  > b79fb61 mem: use slice capacity instead of length, to determine whether to pool buffers or directly allocate them (# 7702) (# 7792)
  > 54841ef  stats/opentelemetry/csm: Get mesh_id local label from "CSM_MESH_ID" environment variable, rather than parsing from bootstrap file (# 7740)
  > ad81c20 pickfirstleaf: minor simplification to reconcileSubConnsLocked method (# 7731)
  > b850ea5 transport : wait for goroutines to exit before transport closes (# 7666)
  > 00b9e14 pickfirst: New pick first policy for dualstack (# 7498)
  > 18a4eac testutils: add couple of log statements to the restartable listener type (# 7716)
  > fdc2ec2 xdsclient: deflake TestADS_ResourcesAreRequestedAfterStreamRestart (# 7720)
  > 4115c21 xds: return all ServerConfig dial options together (# 7718)
  > b8ee37d pickfirst: Move var for mocking the shuffle func from internal/internal to pickfirst/internal (# 7698)
  > d9d8f34 revert xds: return all ServerConfig dial options together (# 7712)
  > 5f178a8 xdsclient: fix test build breakage (# 7710)
  > f17ea7d xds: return all ServerConfig dial options together (# 7680)
  > bdd444d xds: address merge conflict gotcha and missed review comment from previous PRs (# 7705)
  > d365be6 transport: prevent deadlock in transport Close when GoAway write hangs (# 7662)
  > 6c6c9b6 xdsclient: e2e style tests for ads stream restart (5/N) (# 7696)
  > 5e6f4b9 xds: misc test cleanup (4/N) (# 7695)
  > 3adcd41 xdsclient: make load reporting tests e2e style (3/N) (# 7694)
  > 98d1550 xdsclient: switch more transport tests to e2e style (2/N) (# 7693)
  > 9afb232 xdsclient: invoke watch callback when new update matches cached one, but previous update was NACKed (1/N) (# 7692)
  > ab5af45 Revert "protoc-gen-go-grpc: remove `use_generic_streams_experimental`  flag (defaults to true) (# 7654) (# 7703)
  > e8a70c6 vet: add check to ensure terminating newline (# 7645)
  > 5fd9853 examples: improve package comments (# 7658)
  > 859602c vet : add check for tabs in text files (# 7678)
  > 67e47fc xds: Fix flaky test TestUnmarshalListener_WithUpdateValidatorFunc (# 7675)
  > ca4865d balancer: automatically stop producers on subchannel state changes (# 7663)
  > 941102b xds/server: Fix xDS Server leak (# 7664)
  > 7aee163 xds: add xDS transport custom Dialer support (# 7586)
  > 9affdbb internal/credentials/xds: add unit tests for `HandshakeInfo.Equal` (# 7638)
  > 3196f7a protoc-gen-go-grpc: remove `use_generic_streams_experimental` flag (defaults to true) (# 7654)
  > 218811e balancer/rls: Add picker and cache unit tests for RLS Metrics (# 7614)
  > a9ff62d clusterresolver/e2e_test: Avoid making real DNS requests in TestAggregateCluster_BadEDS_BadDNS (# 7669)
  > e7a8097 cleanup: replace grpc.Dial with grpc.NewClient in tests (# 7640)
  > bcf9171 transport: Fix reporting of bytes read while reading headers (# 7660)
  > 8ea3460 balancer: fix logic to prevent producer streams before READY is reported (# 7651)
  > 6c48e47 replace tab with spaces in text files (# 7650)
  > 1418e5e clusterimpl: use gsb.UpdateClientConnState instead of switchTo, on receipt of config update (# 7567)
  > ac41314 .*: Add missing a newline at the end (# 7644)
  > 11c44fb vet: add comment explaining reason for revive lineter disabled rules (# 7634)
  > 3b626a7 *: fix more typos (# 7619)
  > 04e78b0 .*: fix lint issues of not having comments for exported funcs and vars along with any remaining issues and enable remaining disabled rules (# 7575)
  > 31ffeee Deps: Bump Go version in Dockerfiles and test/kokoro/xds.sh (# 7629)
  > 393fbc3 Update dependencies after 1.67 branch cut (# 7624)
  > cf5d541 stubserver: support xds-enabled grpc server (# 7613)
  > b6fde8c vet: add check for trailing spaces (# 7576)
  > 7fb7ac7 mem: replace flate.Reader reference (# 7595)
  > 8f920c6 Change version to 1.68.0-dev (# 7601)
bumping golang.org/x/mod 46a3137...dec0365:
  > dec0365 sumdb: make data tiles by Server compatible with sum.golang.org
  > c8a7319 x/mod: fix handling of vendored packages with '/vendor' in non-top-level paths
  > 9cd0e4c x/mod: remove vendor/modules.txt from module download
bumping golang.org/x/net 6cc5ac4...334afa0:
  > 334afa0 go.mod: update golang.org/x dependencies
  > d7f220d quic: add LocalAddr and RemoteAddr to quic.Conn
  > 858db1a http2: surface errors occurring very early in a client conn's lifetime
  > 0aa844c http2: support unencrypted HTTP/2 handoff from net/http
  > f35fec9 http2: detect hung client connections by confirming stream resets
  > e883dae README: don't recommend go get
  > 511cc3a html: add Node.{Ancestors,ChildNodes,Descendants}()
  > 4783315 http2: limit 1xx based on size, do not limit when delivered
  > 5716b98 internal/socket: execute gofmt
  > 42b1186 http2: support ResponseController.EnableFullDuplex
bumping knative.dev/networking 60e29ff...929a5d5:
  > 929a5d5 upgrade to latest dependencies (# 1022)
  > 5272a36 upgrade to latest dependencies (# 1021)
  > 4c9d5b8 upgrade to latest dependencies (# 1020)
  > 8a88798 Update community files (# 1019)
  > 7341b46 upgrade to latest dependencies (# 1018)
  > d5387fa upgrade to latest dependencies (# 1017)
  > c254dbf upgrade to latest dependencies (# 1016)
  > d954288 upgrade to latest dependencies (# 1014)
  > 49760c2 Update community files (# 1013)
bumping knative.dev/eventing 291c0a8...414af5c:
  > 414af5c Add EventPolicy to storage version migrator (# 8384)
  > f82811b Remove conversion webhook config in EventPolicy CRD (# 8379)
  > 7c1a62d Add health check config and clean up duplicated code (# 8308)
  > bff7b03 Remove unused filter function (# 8375)
  > 8d8b6c9 RequestReply: Added feature flag for default timeout (# 8361)
  > 7bb320c Chore: field semantic correction  (# 8370)
  > 3345c86 Clean up remove session token (# 8369)
  > 6e7e3d4 Adding sns sink to Integration Sink (# 8365)
  > d4357e3 MT-Broker: return retriable status code based on the state to leverage retries (# 8366)
  > 795e4a3 Move image to map (# 8362)
  > 408db83 We changed the event type on the images (# 8364)
  > f577063 Align label generation and apply on container-source as well (# 8355)
  > cb31cb2 Add integrationsinks-addressable-resolver cluster role (# 8356)
  > 545c14c Change JobSink execution mode variable to `K_EXECUTION_MODE` (# 8350)
  > 91f1c79 Avoid using twice hardcoded strings for the names of the Kube Secret keys, for AWS access/secret key vals (# 8345)
  > cb29ac1 [main] Update community files (# 8354)
  > aef3a14 [main] Format Go code (# 8349)
  > 65da6fc Update integrationsink crd  (# 8347)
  > 01c8068 JobSink: Inject a `KNATIVE_EXECUTION_MODE` environment variable with value `batch` (# 8346)
  > 366ff26 IntegrationSink: rek-test templating support for bool annotations (# 8342)
  > 7176ce6 Add IntegrationSink CRD  (# 8304)
  > 5ad7dab feat: add RequestReply types and CRD (# 8337)
  > 7f313d7 fix: rename `job-sink` to `job_sink` (# 8335)
  > 4f6535a chore: correct comments (# 8336)
  > ebe99e6 Introducing common integration helper and change package name (# 8327)
  > 201e096 [main] Format Go code (# 8334)
  > 6f5edf5 [main] Upgrade to latest dependencies (# 8328)
  > f21370a Integration Api moved to versioned common (# 8325)
  > 63d4da5 Update Integration API and use custom tags on structs for better readability (# 8321)
  > 7abb04d JobSink: Delete secrets associated with jobs when jobs are deleted (# 8331)
  > 8fed0be Allow configuring (opt-in) IMC async handler  (# 8311)
  > bc6e878 fix: remove duplicated observedGeneration from jobsinks.sinks.knative.dev (# 8326)
  > 6b4ff7f [main] Upgrade to latest dependencies (# 8324)
  > cd31c1b [main] Upgrade to latest dependencies (# 8322)
  > 9d67389 [chore] Do not generate SSL env vars for each struct entry, just once per containersource/image (# 8315)
  > 35d8c63 [chore] Use a generic access/secret key in the secret for AWS connections (# 8312)
  > f53d038 [main] Update community files (# 8307)
  > df34028 Add missing copyright boilerplate (# 8305)
  > 3f2b75b Use GetServiceHostname when passing URL to JobSink (# 8303)
  > fa6b4c5 Add jobsinks-addressable-resolver cluster role (# 8298)
  > b4b609a Add observedGeneration in JobSink OpenAPI schema (# 8297)
  > 2e4d353 Allow imc-controller to list JobSinks (# 8294)
  > 7bca815 IntegrationSource CRD (# 8238)
  > c859efd [main] Format Go code (# 8289)
  > 7c97e6f Schduler: MAXFILLUP strategy will spread vreplicas across multiple pods (# 8263)
  > ef6b31a fix: et autocreate makes eventtype id required (# 8288)
  > 4faf9c8 [main] Upgrade to latest dependencies (# 8287)
  > 65aeab5 [main] Upgrade to latest dependencies (# 8275)
  > 03ba8f4 feat: autocreate v1beta3 eventtypes (# 8276)
bumping golang.org/x/term 9d5441a...b725e36:
  > b725e36 go.mod: update golang.org/x dependencies
  > 54df7da README: don't recommend go get
bumping golang.org/x/oauth2 6d8340f...3e64809:
  > 3e64809 x/oauth2: add Token.ExpiresIn
  > 16a9973 jwt: rename example to avoid vet error
  > b52af7d endpoints: add GitLab DeviceAuthURL
bumping google.golang.org/protobuf 158d2b3...c72053a:
  > c72053a all: release v1.35.2
  > b985635 internal/impl: fix TestMarshalMessageSetLazyRace (was a no-op!)
  > 76135f9 proto: switch messageset_test to use makeMessages() injection point
  > 29947bb internal/testprotos/test: add nested message field with [lazy=true]
  > 5c14d72 encoding/prototext: use testmessages_test.go approach, too
  > 496557b proto: refactor equal_test from explicit table to use makeMessages()
  > 0517e5a testing/protocmp: document behavior when combining Ignore and Sort
  > d14ebce all: implement strip_enum_prefix editions feature
  > fb995f1 encoding/protojson: allow missing value for Any of type Empty
  > d340238 all: start v1.35.1-devel
bumping knative.dev/serving 6a27004...2d5a1e9:
  > 2d5a1e9 Update net-kourier nightly (# 15647)
  > 5717d19 Update net-kourier nightly (# 15636)
  > 15ac5b1 Update net-contour nightly (# 15634)
  > 8cbbe8f Update net-gateway-api nightly (# 15635)
  > 5911aee Update community files (# 15633)
  > 3e45e8f Update net-kourier nightly (# 15622)
  > 0a61ece Update net-gateway-api nightly (# 15620)
  > 85ce915 upgrade to latest dependencies (# 15623)
  > 9f24a9c Update net-contour nightly (# 15621)
  > bd9050e upgrade to latest dependencies (# 15618)
  > b51a57d upgrade to latest dependencies (# 15613)
  > ebe9d03 Update net-kourier nightly (# 15612)
  > 75a7530 Update net-contour nightly (# 15610)
  > 0b936fa Update net-gateway-api nightly (# 15611)
  > 95d2c9b upgrade to latest dependencies (# 15609)
  > 37aaaae Update net-kourier nightly (# 15605)
  > 25edfee Update net-contour nightly (# 15604)
  > f640bbb Update net-gateway-api nightly (# 15603)
  > 64b8325 Update community files (# 15602)
  > 423e654 Fix configuration metadata inconsistency (# 15601)
  > 135b667 Update net-kourier nightly (# 15598)
  > f215fdb Update net-contour nightly (# 15597)
  > 7dc3e95 Update net-gateway-api nightly (# 15596)
  > 16d2da8 upgrade to latest dependencies (# 15595)
  > 331c097 upgrade to latest dependencies (# 15593)
  > a22343c Update net-gateway-api nightly (# 15590)
  > e8e7de5 Update net-kourier nightly (# 15592)
  > 9d859b5 Update net-contour nightly (# 15589)
  > 83a09f0 Update net-istio nightly (# 15591)
  > d792207 Update net-contour nightly (# 15587)
  > 19b4ce9 Update net-gateway-api nightly (# 15586)
  > 4a9936a Update net-kourier nightly (# 15585)
  > 67460f5 Update net-istio nightly (# 15584)

Signed-off-by: Knative Automation <automation@knative.team>
---
 go.mod                                        |  36 +-
 go.sum                                        |  72 +-
 .../x/net/http2/client_conn_pool.go           |   8 +-
 vendor/golang.org/x/net/http2/server.go       |  34 +-
 vendor/golang.org/x/net/http2/transport.go    | 242 +++++--
 vendor/golang.org/x/net/http2/unencrypted.go  |  32 +
 vendor/golang.org/x/oauth2/token.go           |   7 +
 vendor/golang.org/x/sys/unix/ioctl_linux.go   |  96 +++
 vendor/golang.org/x/sys/unix/mkerrors.sh      |  12 +
 vendor/golang.org/x/sys/unix/syscall_linux.go |   1 +
 .../x/sys/unix/syscall_zos_s390x.go           | 104 ++-
 vendor/golang.org/x/sys/unix/zerrors_linux.go |  22 +
 .../x/sys/unix/zerrors_linux_386.go           |  14 +
 .../x/sys/unix/zerrors_linux_amd64.go         |  14 +
 .../x/sys/unix/zerrors_linux_arm.go           |  14 +
 .../x/sys/unix/zerrors_linux_arm64.go         |  14 +
 .../x/sys/unix/zerrors_linux_loong64.go       |  14 +
 .../x/sys/unix/zerrors_linux_mips.go          |  14 +
 .../x/sys/unix/zerrors_linux_mips64.go        |  14 +
 .../x/sys/unix/zerrors_linux_mips64le.go      |  14 +
 .../x/sys/unix/zerrors_linux_mipsle.go        |  14 +
 .../x/sys/unix/zerrors_linux_ppc.go           |  14 +
 .../x/sys/unix/zerrors_linux_ppc64.go         |  14 +
 .../x/sys/unix/zerrors_linux_ppc64le.go       |  14 +
 .../x/sys/unix/zerrors_linux_riscv64.go       |  14 +
 .../x/sys/unix/zerrors_linux_s390x.go         |  14 +
 .../x/sys/unix/zerrors_linux_sparc64.go       |  14 +
 .../golang.org/x/sys/unix/zsyscall_linux.go   |  10 +
 vendor/golang.org/x/sys/unix/ztypes_linux.go  | 120 +++-
 .../golang.org/x/sys/unix/ztypes_zos_s390x.go |   6 +
 .../x/sys/windows/syscall_windows.go          |  34 +-
 .../golang.org/x/sys/windows/types_windows.go | 126 ++++
 .../x/sys/windows/zsyscall_windows.go         |  53 ++
 vendor/golang.org/x/term/README.md            |  11 +-
 .../x/tools/go/ast/astutil/imports.go         |   5 +
 .../x/tools/go/gcexportdata/gcexportdata.go   |  97 ++-
 .../x/tools/go/packages/external.go           |   6 +-
 .../golang.org/x/tools/go/packages/golist.go  |  38 +-
 .../x/tools/go/packages/packages.go           | 325 +++++----
 .../x/tools/go/types/objectpath/objectpath.go |  99 ++-
 .../x/tools/internal/gcimporter/iexport.go    |  24 +-
 .../x/tools/internal/gcimporter/iimport.go    |  12 +
 .../internal/gcimporter/iimport_go122.go      |  53 ++
 .../x/tools/internal/imports/fix.go           | 233 +++----
 .../x/tools/internal/imports/imports.go       |  31 +-
 .../x/tools/internal/imports/source.go        |  63 ++
 .../x/tools/internal/imports/source_env.go    | 125 ++++
 .../x/tools/internal/typeparams/free.go       |  17 +-
 .../x/tools/internal/typesinternal/types.go   |  56 ++
 .../x/tools/internal/versions/types.go        |   5 -
 vendor/google.golang.org/grpc/CONTRIBUTING.md |  16 +-
 .../grpc/balancer/balancer.go                 |  15 +-
 .../grpc/balancer/base/balancer.go            |   2 +-
 .../balancer/pickfirst/internal/internal.go   |  24 +
 .../grpc/balancer/pickfirst/pickfirst.go      |  14 +-
 .../pickfirst/pickfirstleaf/pickfirstleaf.go  | 625 ++++++++++++++++++
 .../grpc/balancer_wrapper.go                  |  48 +-
 vendor/google.golang.org/grpc/clientconn.go   |  54 +-
 .../google.golang.org/grpc/credentials/tls.go |  29 +-
 vendor/google.golang.org/grpc/dialoptions.go  |   2 +-
 .../balancer/gracefulswitch/config.go         |   2 +
 .../grpc/internal/channelz/channel.go         |  15 +
 .../grpc/internal/channelz/server.go          |   2 +
 .../grpc/internal/channelz/socket.go          |   7 +
 .../grpc/internal/channelz/subchannel.go      |   2 +
 .../grpc/internal/channelz/trace.go           |  19 +-
 .../grpc/internal/envconfig/envconfig.go      |   5 +
 .../internal/grpcsync/callback_serializer.go  |   2 +-
 .../grpc/internal/grpcutil/method.go          |   2 +-
 .../grpc/internal/idle/idle.go                |   4 +-
 .../grpc/internal/internal.go                 |   8 +-
 .../internal/resolver/dns/dns_resolver.go     |   2 +-
 .../internal/stats/metrics_recorder_list.go   |  10 +
 .../grpc/internal/status/status.go            |  35 +-
 .../grpc/internal/transport/http2_client.go   |  60 +-
 .../grpc/internal/transport/transport.go      |   9 +
 vendor/google.golang.org/grpc/mem/buffers.go  |  32 +-
 vendor/google.golang.org/grpc/rpc_util.go     |   3 +-
 vendor/google.golang.org/grpc/version.go      |   2 +-
 .../encoding/protojson/well_known_types.go    |   6 +-
 .../editiondefaults/editions_defaults.binpb   | Bin 93 -> 99 bytes
 .../internal/editionssupport/editions.go      |   5 +
 .../protobuf/internal/filedesc/desc.go        |  15 +
 .../protobuf/internal/filedesc/editions.go    |   4 +
 .../internal/genid/go_features_gen.go         |  17 +
 .../protobuf/internal/version/version.go      |   2 +-
 .../protobuf/reflect/protodesc/desc.go        |  12 +-
 .../protobuf/reflect/protodesc/editions.go    |   5 +
 .../types/gofeaturespb/go_features.pb.go      | 131 +++-
 .../apis/common/integration/v1alpha1/auth.go  |  43 ++
 .../apis/common/integration/v1alpha1/aws.go   |  76 +++
 .../apis/common/integration/v1alpha1/doc.go   |  19 +
 .../v1alpha1/zz_generated.deepcopy.go         | 164 +++++
 .../pkg/apis/eventing/v1alpha1/register.go    |   2 +
 .../v1alpha1/requestreply_conversion.go       |  34 +
 .../v1alpha1/requestreply_defaults.go         |  44 ++
 .../v1alpha1/requestreply_lifecycle.go        | 106 +++
 .../eventing/v1alpha1/requestreply_types.go   | 122 ++++
 .../v1alpha1/requestreply_validation.go       |  83 +++
 .../v1alpha1/zz_generated.deepcopy.go         | 113 ++++
 .../eventing/pkg/apis/feature/features.go     |  38 +-
 .../eventing/pkg/apis/feature/flag_names.go   |  25 +-
 .../messaging/v1/in_memory_channel_types.go   |  11 +-
 .../eventing/pkg/apis/sinks/register.go       |   6 +
 .../v1alpha1/integration_sink_conversion.go   |  36 +
 .../v1alpha1/integration_sink_defaults.go     |  26 +
 .../v1alpha1/integration_sink_lifecycle.go    | 123 ++++
 .../sinks/v1alpha1/integration_sink_types.go  | 118 ++++
 .../v1alpha1/integration_sink_validation.go   |  97 +++
 .../apis/sinks/v1alpha1/job_sink_defaults.go  |  41 ++
 .../pkg/apis/sinks/v1alpha1/job_sink_types.go |  13 +-
 .../pkg/apis/sinks/v1alpha1/register.go       |   2 +
 .../sinks/v1alpha1/zz_generated.deepcopy.go   | 159 +++++
 .../eventing/pkg/apis/sources/register.go     |   6 +
 .../eventing/pkg/apis/sources/v1alpha1/doc.go |  20 +
 .../v1alpha1/integration_conversion.go        |  36 +
 .../sources/v1alpha1/integration_defaults.go  |  26 +
 .../sources/v1alpha1/integration_lifecycle.go |  76 +++
 .../sources/v1alpha1/integration_types.go     | 113 ++++
 .../v1alpha1/integration_validation.go        |  98 +++
 .../pkg/apis/sources/v1alpha1/register.go     |  52 ++
 .../sources/v1alpha1/zz_generated.deepcopy.go | 184 ++++++
 .../clientset/versioned/scheme/register.go    |   2 +
 .../eventing/pkg/eventingtls/eventingtls.go   |   3 +
 vendor/knative.dev/hack/release.sh            |   2 +-
 vendor/modules.txt                            |  48 +-
 126 files changed, 5120 insertions(+), 719 deletions(-)
 create mode 100644 vendor/golang.org/x/net/http2/unencrypted.go
 create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
 create mode 100644 vendor/golang.org/x/tools/internal/imports/source.go
 create mode 100644 vendor/golang.org/x/tools/internal/imports/source_env.go
 create mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
 create mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/auth.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/aws.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/doc.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/zz_generated.deepcopy.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_conversion.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_defaults.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_lifecycle.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_types.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_validation.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_conversion.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_defaults.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_lifecycle.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_types.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_validation.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/doc.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_conversion.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_defaults.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_lifecycle.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_types.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_validation.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/register.go
 create mode 100644 vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go

diff --git a/go.mod b/go.mod
index 5c7a9e0a..a9dc7fa0 100644
--- a/go.mod
+++ b/go.mod
@@ -1,7 +1,7 @@
 // Deprecated: use knative.dev/client/pkg instead.
 module knative.dev/client-pkg
 
-go 1.22.0
+go 1.22.7
 
 require (
 	emperror.dev/errors v0.8.1
@@ -17,7 +17,7 @@ require (
 	github.com/spf13/viper v1.13.0
 	go.uber.org/multierr v1.11.0
 	go.uber.org/zap v1.27.0
-	golang.org/x/term v0.25.0
+	golang.org/x/term v0.26.0
 	gotest.tools/v3 v3.3.0
 	k8s.io/api v0.30.3
 	k8s.io/apiextensions-apiserver v0.30.3
@@ -25,10 +25,10 @@ require (
 	k8s.io/cli-runtime v0.29.2
 	k8s.io/client-go v0.30.3
 	k8s.io/code-generator v0.30.3
-	knative.dev/eventing v0.43.0
-	knative.dev/hack v0.0.0-20241010131451-05b2fb30cb4d
-	knative.dev/pkg v0.0.0-20241021183759-9b9d535af5ad
-	knative.dev/serving v0.43.0
+	knative.dev/eventing v0.43.1-0.20241213142054-414af5ce0fae
+	knative.dev/hack v0.0.0-20241128013751-1978b3a02667
+	knative.dev/pkg v0.0.0-20241128013618-f3ab5605e542
+	knative.dev/serving v0.43.1-0.20241209084051-2d5a1e99798a
 	sigs.k8s.io/yaml v1.4.0
 )
 
@@ -118,20 +118,20 @@ require (
 	github.com/xlab/treeprint v1.2.0 // indirect
 	go.opencensus.io v0.24.0 // indirect
 	go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
-	golang.org/x/mod v0.21.0 // indirect
-	golang.org/x/net v0.30.0 // indirect
-	golang.org/x/oauth2 v0.22.0 // indirect
-	golang.org/x/sync v0.8.0 // indirect
-	golang.org/x/sys v0.26.0 // indirect
-	golang.org/x/text v0.19.0 // indirect
+	golang.org/x/mod v0.22.0 // indirect
+	golang.org/x/net v0.31.0 // indirect
+	golang.org/x/oauth2 v0.23.0 // indirect
+	golang.org/x/sync v0.9.0 // indirect
+	golang.org/x/sys v0.27.0 // indirect
+	golang.org/x/text v0.20.0 // indirect
 	golang.org/x/time v0.6.0 // indirect
-	golang.org/x/tools v0.26.0 // indirect
+	golang.org/x/tools v0.27.0 // indirect
 	gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
 	google.golang.org/api v0.183.0 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
-	google.golang.org/grpc v1.67.1 // indirect
-	google.golang.org/protobuf v1.35.1 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
+	google.golang.org/grpc v1.68.0 // indirect
+	google.golang.org/protobuf v1.35.2 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/ini.v1 v1.67.0 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
@@ -142,7 +142,7 @@ require (
 	k8s.io/klog/v2 v2.130.1 // indirect
 	k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 // indirect
 	k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
-	knative.dev/networking v0.0.0-20241022012959-60e29ff520dc // indirect
+	knative.dev/networking v0.0.0-20241118075147-929a5d5f19d0 // indirect
 	sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
 	sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
 	sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
diff --git a/go.sum b/go.sum
index 23bf836b..e59c867d 100644
--- a/go.sum
+++ b/go.sum
@@ -483,8 +483,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
 golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
-golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
+golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -520,8 +520,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
-golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
+golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -560,8 +560,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
 golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
-golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
+golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
+golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -573,8 +573,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
 golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
-golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
+golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -586,8 +586,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
+golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -639,13 +639,13 @@ golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
-golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
+golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
-golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
+golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
+golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -654,8 +654,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
-golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
+golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -711,8 +711,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
-golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
+golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o=
+golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -786,10 +786,10 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
 google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
+google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
 google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -806,8 +806,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
 google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
 google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
-google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0=
+google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
 google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -822,8 +822,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
 google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
-google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
+google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -881,16 +881,16 @@ k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 h1:1Wof1cGQgA5pqgo8MxKPtf
 k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8/go.mod h1:Os6V6dZwLNii3vxFpxcNaTmH8LJJBkOTg1N0tOA0fvA=
 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-knative.dev/eventing v0.43.0 h1:GELHZ0yYosMeV78l4alMsd7HJciEu6a3T2C5l7MPi3Y=
-knative.dev/eventing v0.43.0/go.mod h1:pdrF+bEUfRkNn9ifWXS7DoVj5W31gA5KQVd8iwplXUo=
-knative.dev/hack v0.0.0-20241010131451-05b2fb30cb4d h1:aCfX7kwkvgGxXXGbso5tLqdwQmzBkJ9d+EIRwksKTvk=
-knative.dev/hack v0.0.0-20241010131451-05b2fb30cb4d/go.mod h1:R0ritgYtjLDO9527h5vb5X6gfvt5LCrJ55BNbVDsWiY=
-knative.dev/networking v0.0.0-20241022012959-60e29ff520dc h1:0d9XXRLlyuHfINZLlYqo/BYe/+chqqNBMLKJldjTbtw=
-knative.dev/networking v0.0.0-20241022012959-60e29ff520dc/go.mod h1:G56j6VCLzfaN9yZ4IqfNyN4c3U1czvhUmKeZX4UjQ8Q=
-knative.dev/pkg v0.0.0-20241021183759-9b9d535af5ad h1:Nrjtr2H168rJeamH4QdyLMV1lEKHejNhaj1ymgQMfLk=
-knative.dev/pkg v0.0.0-20241021183759-9b9d535af5ad/go.mod h1:StJI72GWcm/iErmk4RqFJiOo8RLbVqPbHxUqeVwAzeo=
-knative.dev/serving v0.43.0 h1:S+nCHYBaKo8r1kge6zF7hDxQrag5rwMkQTSZyDmrYIc=
-knative.dev/serving v0.43.0/go.mod h1:qYjwZdjv3SD7t+Tk/hvxml824G5njXZrycmCBBALpJk=
+knative.dev/eventing v0.43.1-0.20241213142054-414af5ce0fae h1:7xIqlU0Hf/iIfA2oTsoXP0i8AeElBIwC6jiVX7IOZvg=
+knative.dev/eventing v0.43.1-0.20241213142054-414af5ce0fae/go.mod h1:RxMFtxk903ZoxyS140MPdLLePTzBdeaGkVmBTB52t04=
+knative.dev/hack v0.0.0-20241128013751-1978b3a02667 h1:cp3GfEBnL0H2OrqdxLZ7nZ2K7U4PMdQhdBogl4Vd5+E=
+knative.dev/hack v0.0.0-20241128013751-1978b3a02667/go.mod h1:R0ritgYtjLDO9527h5vb5X6gfvt5LCrJ55BNbVDsWiY=
+knative.dev/networking v0.0.0-20241118075147-929a5d5f19d0 h1:3vj6wR95isnuqgjQzcclyrzaodv5Jvjc7xq4Bv0yde8=
+knative.dev/networking v0.0.0-20241118075147-929a5d5f19d0/go.mod h1:VvJGbKdlbEG6xr8q2LMLpiUlIt8OUiJZBRlT9yUq09w=
+knative.dev/pkg v0.0.0-20241128013618-f3ab5605e542 h1:x5cEwa34aOgQDnSTEE+UtRk5lND0cvb8KbVBUShwNuI=
+knative.dev/pkg v0.0.0-20241128013618-f3ab5605e542/go.mod h1:C2dxK66GlycMOS0SKqv0SMAnWkxsYbG4hkH32Xg1qD0=
+knative.dev/serving v0.43.1-0.20241209084051-2d5a1e99798a h1:N7yG6/VmuUitxqja8BatcIcqcDBq1Ew7OoTTf7wItY4=
+knative.dev/serving v0.43.1-0.20241209084051-2d5a1e99798a/go.mod h1:O39Rmsexjtgrqx2MwzH7cbOl+EP2Da9v+HHuh0N4uIs=
 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
 rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
index 780968d6..e81b73e6 100644
--- a/vendor/golang.org/x/net/http2/client_conn_pool.go
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -8,8 +8,8 @@ package http2
 
 import (
 	"context"
-	"crypto/tls"
 	"errors"
+	"net"
 	"net/http"
 	"sync"
 )
@@ -158,7 +158,7 @@ func (c *dialCall) dial(ctx context.Context, addr string) {
 // This code decides which ones live or die.
 // The return value used is whether c was used.
 // c is never closed.
-func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
+func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) {
 	p.mu.Lock()
 	for _, cc := range p.conns[key] {
 		if cc.CanTakeNewRequest() {
@@ -194,8 +194,8 @@ type addConnCall struct {
 	err  error
 }
 
-func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
-	cc, err := t.NewClientConn(tc)
+func (c *addConnCall) run(t *Transport, key string, nc net.Conn) {
+	cc, err := t.NewClientConn(nc)
 
 	p := c.p
 	p.mu.Lock()
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 617b4a47..832414b4 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -306,7 +306,7 @@ func ConfigureServer(s *http.Server, conf *Server) error {
 	if s.TLSNextProto == nil {
 		s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
 	}
-	protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+	protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) {
 		if testHookOnConn != nil {
 			testHookOnConn()
 		}
@@ -323,12 +323,31 @@ func ConfigureServer(s *http.Server, conf *Server) error {
 			ctx = bc.BaseContext()
 		}
 		conf.ServeConn(c, &ServeConnOpts{
-			Context:    ctx,
-			Handler:    h,
-			BaseConfig: hs,
+			Context:          ctx,
+			Handler:          h,
+			BaseConfig:       hs,
+			SawClientPreface: sawClientPreface,
 		})
 	}
-	s.TLSNextProto[NextProtoTLS] = protoHandler
+	s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
+		protoHandler(hs, c, h, false)
+	}
+	// The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns.
+	//
+	// A connection passed in this method has already had the HTTP/2 preface read from it.
+	s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
+		nc, err := unencryptedNetConnFromTLSConn(c)
+		if err != nil {
+			if lg := hs.ErrorLog; lg != nil {
+				lg.Print(err)
+			} else {
+				log.Print(err)
+			}
+			go c.Close()
+			return
+		}
+		protoHandler(hs, nc, h, true)
+	}
 	return nil
 }
 
@@ -2880,6 +2899,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
 	return nil
 }
 
+func (w *responseWriter) EnableFullDuplex() error {
+	// We always support full duplex responses, so this is a no-op.
+	return nil
+}
+
 func (w *responseWriter) Flush() {
 	w.FlushError()
 }
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 0c5f64aa..f5968f44 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -202,6 +202,20 @@ func (t *Transport) markNewGoroutine() {
 	}
 }
 
+func (t *Transport) now() time.Time {
+	if t != nil && t.transportTestHooks != nil {
+		return t.transportTestHooks.group.Now()
+	}
+	return time.Now()
+}
+
+func (t *Transport) timeSince(when time.Time) time.Duration {
+	if t != nil && t.transportTestHooks != nil {
+		return t.now().Sub(when)
+	}
+	return time.Since(when)
+}
+
 // newTimer creates a new time.Timer, or a synthetic timer in tests.
 func (t *Transport) newTimer(d time.Duration) timer {
 	if t.transportTestHooks != nil {
@@ -281,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) {
 	if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
 		t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
 	}
-	upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
-		addr := authorityAddr("https", authority)
+	upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper {
+		addr := authorityAddr(scheme, authority)
 		if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
 			go c.Close()
 			return erringRoundTripper{err}
@@ -293,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) {
 			// was unknown)
 			go c.Close()
 		}
+		if scheme == "http" {
+			return (*unencryptedTransport)(t2)
+		}
 		return t2
 	}
-	if m := t1.TLSNextProto; len(m) == 0 {
-		t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
-			"h2": upgradeFn,
+	if t1.TLSNextProto == nil {
+		t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper)
+	}
+	t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper {
+		return upgradeFn("https", authority, c)
+	}
+	// The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns.
+	t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper {
+		nc, err := unencryptedNetConnFromTLSConn(c)
+		if err != nil {
+			go c.Close()
+			return erringRoundTripper{err}
 		}
-	} else {
-		m["h2"] = upgradeFn
+		return upgradeFn("http", authority, nc)
 	}
 	return t2, nil
 }
 
+// unencryptedTransport is a Transport with a RoundTrip method that
+// always permits http:// URLs.
+type unencryptedTransport Transport
+
+func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+	return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true})
+}
+
 func (t *Transport) connPool() ClientConnPool {
 	t.connPoolOnce.Do(t.initConnPool)
 	return t.connPoolOrDef
@@ -324,7 +357,7 @@ type ClientConn struct {
 	t             *Transport
 	tconn         net.Conn             // usually *tls.Conn, except specialized impls
 	tlsState      *tls.ConnectionState // nil only for specialized impls
-	reused        uint32               // whether conn is being reused; atomic
+	atomicReused  uint32               // whether conn is being reused; atomic
 	singleUse     bool                 // whether being used for a single http.Request
 	getConnCalled bool                 // used by clientConnPool
 
@@ -364,6 +397,14 @@ type ClientConn struct {
 	readIdleTimeout             time.Duration
 	pingTimeout                 time.Duration
 
+	// pendingResets is the number of RST_STREAM frames we have sent to the peer,
+	// without confirming that the peer has received them. When we send a RST_STREAM,
+	// we bundle it with a PING frame, unless a PING is already in flight. We count
+	// the reset stream against the connection's concurrency limit until we get
+	// a PING response. This limits the number of requests we'll try to send to a
+	// completely unresponsive connection.
+	pendingResets int
+
 	// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
 	// Write to reqHeaderMu to lock it, read from it to unlock.
 	// Lock reqmu BEFORE mu or wmu.
@@ -420,12 +461,12 @@ type clientStream struct {
 	sentHeaders   bool
 
 	// owned by clientConnReadLoop:
-	firstByte    bool  // got the first response byte
-	pastHeaders  bool  // got first MetaHeadersFrame (actual headers)
-	pastTrailers bool  // got optional second MetaHeadersFrame (trailers)
-	num1xx       uint8 // number of 1xx responses seen
-	readClosed   bool  // peer sent an END_STREAM flag
-	readAborted  bool  // read loop reset the stream
+	firstByte       bool  // got the first response byte
+	pastHeaders     bool  // got first MetaHeadersFrame (actual headers)
+	pastTrailers    bool  // got optional second MetaHeadersFrame (trailers)
+	readClosed      bool  // peer sent an END_STREAM flag
+	readAborted     bool  // read loop reset the stream
+	totalHeaderSize int64 // total size of 1xx headers seen
 
 	trailer    http.Header  // accumulated trailers
 	resTrailer *http.Header // client's Response.Trailer
@@ -530,6 +571,8 @@ type RoundTripOpt struct {
 	// no cached connection is available, RoundTripOpt
 	// will return ErrNoCachedConn.
 	OnlyCachedConn bool
+
+	allowHTTP bool // allow http:// URLs
 }
 
 func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
@@ -562,7 +605,14 @@ func authorityAddr(scheme string, authority string) (addr string) {
 
 // RoundTripOpt is like RoundTrip, but takes options.
 func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
-	if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
+	switch req.URL.Scheme {
+	case "https":
+		// Always okay.
+	case "http":
+		if !t.AllowHTTP && !opt.allowHTTP {
+			return nil, errors.New("http2: unencrypted HTTP/2 not enabled")
+		}
+	default:
 		return nil, errors.New("http2: unsupported scheme")
 	}
 
@@ -573,7 +623,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
 			t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
 			return nil, err
 		}
-		reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
+		reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1)
 		traceGotConn(req, cc, reused)
 		res, err := cc.RoundTrip(req)
 		if err != nil && retry <= 6 {
@@ -598,6 +648,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
 				}
 			}
 		}
+		if err == errClientConnNotEstablished {
+			// This ClientConn was created recently,
+			// this is the first request to use it,
+			// and the connection is closed and not usable.
+			//
+			// In this state, cc.idleTimer will remove the conn from the pool
+			// when it fires. Stop the timer and remove it here so future requests
+			// won't try to use this connection.
+			//
+			// If the timer has already fired and we're racing it, the redundant
+			// call to MarkDead is harmless.
+			if cc.idleTimer != nil {
+				cc.idleTimer.Stop()
+			}
+			t.connPool().MarkDead(cc)
+		}
 		if err != nil {
 			t.vlogf("RoundTrip failure: %v", err)
 			return nil, err
@@ -616,9 +682,10 @@ func (t *Transport) CloseIdleConnections() {
 }
 
 var (
-	errClientConnClosed    = errors.New("http2: client conn is closed")
-	errClientConnUnusable  = errors.New("http2: client conn not usable")
-	errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+	errClientConnClosed         = errors.New("http2: client conn is closed")
+	errClientConnUnusable       = errors.New("http2: client conn not usable")
+	errClientConnNotEstablished = errors.New("http2: client conn could not be established")
+	errClientConnGotGoAway      = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
 )
 
 // shouldRetryRequest is called by RoundTrip when a request fails to get
@@ -757,6 +824,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
 		pingTimeout:                 conf.PingTimeout,
 		pings:                       make(map[[8]byte]chan struct{}),
 		reqHeaderMu:                 make(chan struct{}, 1),
+		lastActive:                  t.now(),
 	}
 	var group synctestGroupInterface
 	if t.transportTestHooks != nil {
@@ -960,7 +1028,7 @@ func (cc *ClientConn) State() ClientConnState {
 	return ClientConnState{
 		Closed:               cc.closed,
 		Closing:              cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil,
-		StreamsActive:        len(cc.streams),
+		StreamsActive:        len(cc.streams) + cc.pendingResets,
 		StreamsReserved:      cc.streamsReserved,
 		StreamsPending:       cc.pendingRequests,
 		LastIdle:             cc.lastIdle,
@@ -992,16 +1060,38 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
 		// writing it.
 		maxConcurrentOkay = true
 	} else {
-		maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams)
+		// We can take a new request if the total of
+		//   - active streams;
+		//   - reservation slots for new streams; and
+		//   - streams for which we have sent a RST_STREAM and a PING,
+		//     but received no subsequent frame
+		// is less than the concurrency limit.
+		maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams)
 	}
 
 	st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
 		!cc.doNotReuse &&
 		int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
 		!cc.tooIdleLocked()
+
+	// If this connection has never been used for a request and is closed,
+	// then let it take a request (which will fail).
+	//
+	// This avoids a situation where an error early in a connection's lifetime
+	// goes unreported.
+	if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed {
+		st.canTakeNewRequest = true
+	}
+
 	return
 }
 
+// currentRequestCountLocked reports the number of concurrency slots currently in use,
+// including active streams, reserved slots, and reset streams waiting for acknowledgement.
+func (cc *ClientConn) currentRequestCountLocked() int {
+	return len(cc.streams) + cc.streamsReserved + cc.pendingResets
+}
+
 func (cc *ClientConn) canTakeNewRequestLocked() bool {
 	st := cc.idleStateLocked()
 	return st.canTakeNewRequest
@@ -1014,7 +1104,7 @@ func (cc *ClientConn) tooIdleLocked() bool {
 	// times are compared based on their wall time. We don't want
 	// to reuse a connection that's been sitting idle during
 	// VM/laptop suspend if monotonic time was also frozen.
-	return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
+	return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
 }
 
 // onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1578,6 +1668,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
 		cs.reqBodyClosed = make(chan struct{})
 	}
 	bodyClosed := cs.reqBodyClosed
+	closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
 	cc.mu.Unlock()
 	if mustCloseBody {
 		cs.reqBody.Close()
@@ -1602,16 +1693,40 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
 		if cs.sentHeaders {
 			if se, ok := err.(StreamError); ok {
 				if se.Cause != errFromPeer {
-					cc.writeStreamReset(cs.ID, se.Code, err)
+					cc.writeStreamReset(cs.ID, se.Code, false, err)
 				}
 			} else {
-				cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
+				// We're cancelling an in-flight request.
+				//
+				// This could be due to the server becoming unresponsive.
+				// To avoid sending too many requests on a dead connection,
+				// we let the request continue to consume a concurrency slot
+				// until we can confirm the server is still responding.
+				// We do this by sending a PING frame along with the RST_STREAM
+				// (unless a ping is already in flight).
+				//
+				// For simplicity, we don't bother tracking the PING payload:
+				// We reset cc.pendingResets any time we receive a PING ACK.
+				//
+				// We skip this if the conn is going to be closed on idle,
+				// because it's short lived and will probably be closed before
+				// we get the ping response.
+				ping := false
+				if !closeOnIdle {
+					cc.mu.Lock()
+					if cc.pendingResets == 0 {
+						ping = true
+					}
+					cc.pendingResets++
+					cc.mu.Unlock()
+				}
+				cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err)
 			}
 		}
 		cs.bufPipe.CloseWithError(err) // no-op if already closed
 	} else {
 		if cs.sentHeaders && !cs.sentEndStream {
-			cc.writeStreamReset(cs.ID, ErrCodeNo, nil)
+			cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil)
 		}
 		cs.bufPipe.CloseWithError(errRequestCanceled)
 	}
@@ -1633,12 +1748,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
 // Must hold cc.mu.
 func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
 	for {
-		cc.lastActive = time.Now()
+		if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 {
+			// This is the very first request sent to this connection.
+			// Return a fatal error which aborts the retry loop.
+			return errClientConnNotEstablished
+		}
+		cc.lastActive = cc.t.now()
 		if cc.closed || !cc.canTakeNewRequestLocked() {
 			return errClientConnUnusable
 		}
 		cc.lastIdle = time.Time{}
-		if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) {
+		if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) {
 			return nil
 		}
 		cc.pendingRequests++
@@ -2180,10 +2300,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
 	if len(cc.streams) != slen-1 {
 		panic("forgetting unknown stream id")
 	}
-	cc.lastActive = time.Now()
+	cc.lastActive = cc.t.now()
 	if len(cc.streams) == 0 && cc.idleTimer != nil {
 		cc.idleTimer.Reset(cc.idleTimeout)
-		cc.lastIdle = time.Now()
+		cc.lastIdle = cc.t.now()
 	}
 	// Wake up writeRequestBody via clientStream.awaitFlowControl and
 	// wake up RoundTrip if there is a pending request.
@@ -2243,7 +2363,6 @@ func isEOFOrNetReadError(err error) bool {
 
 func (rl *clientConnReadLoop) cleanup() {
 	cc := rl.cc
-	cc.t.connPool().MarkDead(cc)
 	defer cc.closeConn()
 	defer close(cc.readerDone)
 
@@ -2267,6 +2386,24 @@ func (rl *clientConnReadLoop) cleanup() {
 	}
 	cc.closed = true
 
+	// If the connection has never been used, and has been open for only a short time,
+	// leave it in the connection pool for a little while.
+	//
+	// This avoids a situation where new connections are constantly created,
+	// added to the pool, fail, and are removed from the pool, without any error
+	// being surfaced to the user.
+	const unusedWaitTime = 5 * time.Second
+	idleTime := cc.t.now().Sub(cc.lastActive)
+	if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime {
+		cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+			cc.t.connPool().MarkDead(cc)
+		})
+	} else {
+		cc.mu.Unlock() // avoid any deadlocks in MarkDead
+		cc.t.connPool().MarkDead(cc)
+		cc.mu.Lock()
+	}
+
 	for _, cs := range cc.streams {
 		select {
 		case <-cs.peerClosed:
@@ -2494,15 +2631,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
 		if f.StreamEnded() {
 			return nil, errors.New("1xx informational response with END_STREAM flag")
 		}
-		cs.num1xx++
-		const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
-		if cs.num1xx > max1xxResponses {
-			return nil, errors.New("http2: too many 1xx informational responses")
-		}
 		if fn := cs.get1xxTraceFunc(); fn != nil {
+			// If the 1xx response is being delivered to the user,
+			// then they're responsible for limiting the number
+			// of responses.
 			if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
 				return nil, err
 			}
+		} else {
+			// If the user didn't examine the 1xx response, then we
+			// limit the size of all 1xx headers.
+			//
+			// This differs a bit from the HTTP/1 implementation, which
+			// limits the size of all 1xx headers plus the final response.
+			// Use the larger limit of MaxHeaderListSize and
+			// net/http.Transport.MaxResponseHeaderBytes.
+			limit := int64(cs.cc.t.maxHeaderListSize())
+			if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit {
+				limit = t1.MaxResponseHeaderBytes
+			}
+			for _, h := range f.Fields {
+				cs.totalHeaderSize += int64(h.Size())
+			}
+			if cs.totalHeaderSize > limit {
+				if VerboseLogs {
+					log.Printf("http2: 1xx informational responses too large")
+				}
+				return nil, errors.New("header list too large")
+			}
 		}
 		if statusCode == 100 {
 			traceGot100Continue(cs.trace)
@@ -3046,6 +3202,11 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
 			close(c)
 			delete(cc.pings, f.Data)
 		}
+		if cc.pendingResets > 0 {
+			// See clientStream.cleanupWriteRequest.
+			cc.pendingResets = 0
+			cc.cond.Broadcast()
+		}
 		return nil
 	}
 	cc := rl.cc
@@ -3068,13 +3229,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
 	return ConnectionError(ErrCodeProtocol)
 }
 
-func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+// writeStreamReset sends a RST_STREAM frame.
+// When ping is true, it also sends a PING frame with a random payload.
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) {
 	// TODO: map err to more interesting error codes, once the
 	// HTTP community comes up with some. But currently for
 	// RST_STREAM there's no equivalent to GOAWAY frame's debug
 	// data, and the error codes are all pretty vague ("cancel").
 	cc.wmu.Lock()
 	cc.fr.WriteRSTStream(streamID, code)
+	if ping {
+		var payload [8]byte
+		rand.Read(payload[:])
+		cc.fr.WritePing(false, payload)
+	}
 	cc.bw.Flush()
 	cc.wmu.Unlock()
 }
@@ -3228,7 +3396,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
 	cc.mu.Lock()
 	ci.WasIdle = len(cc.streams) == 0 && reused
 	if ci.WasIdle && !cc.lastActive.IsZero() {
-		ci.IdleTime = time.Since(cc.lastActive)
+		ci.IdleTime = cc.t.timeSince(cc.lastActive)
 	}
 	cc.mu.Unlock()
 
diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go
new file mode 100644
index 00000000..b2de2116
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/unencrypted.go
@@ -0,0 +1,32 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"crypto/tls"
+	"errors"
+	"net"
+)
+
+const nextProtoUnencryptedHTTP2 = "unencrypted_http2"
+
+// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn.
+//
+// TLSNextProto functions accept a *tls.Conn.
+//
+// When passing an unencrypted HTTP/2 connection to a TLSNextProto function,
+// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection.
+// To be extra careful about mistakes (accidentally dropping TLS encryption in a place
+// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method
+// that returns the actual connection we want to use.
+func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) {
+	conner, ok := tc.NetConn().(interface {
+		UnencryptedNetConn() net.Conn
+	})
+	if !ok {
+		return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff")
+	}
+	return conner.UnencryptedNetConn(), nil
+}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 5bbb3321..109997d7 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -49,6 +49,13 @@ type Token struct {
 	// mechanisms for that TokenSource will not be used.
 	Expiry time.Time `json:"expiry,omitempty"`
 
+	// ExpiresIn is the OAuth2 wire format "expires_in" field,
+	// which specifies how many seconds later the token expires,
+	// relative to an unknown time base approximately around "now".
+	// It is the application's responsibility to populate
+	// `Expiry` from `ExpiresIn` when required.
+	ExpiresIn int64 `json:"expires_in,omitempty"`
+
 	// raw optionally contains extra metadata from the server
 	// when updating a token.
 	raw interface{}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go
index dbe680ea..7ca4fa12 100644
--- a/vendor/golang.org/x/sys/unix/ioctl_linux.go
+++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go
@@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
 	return &value, err
 }
 
+// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC
+// association for the network device specified by ifname.
+func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) {
+	ifr, err := NewIfreq(ifname)
+	if err != nil {
+		return nil, err
+	}
+
+	value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO}
+	ifrd := ifr.withData(unsafe.Pointer(&value))
+
+	err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
+	return &value, err
+}
+
+// IoctlGetHwTstamp retrieves the hardware timestamping configuration
+// for the network device specified by ifname.
+func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) {
+	ifr, err := NewIfreq(ifname)
+	if err != nil {
+		return nil, err
+	}
+
+	value := HwTstampConfig{}
+	ifrd := ifr.withData(unsafe.Pointer(&value))
+
+	err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd)
+	return &value, err
+}
+
+// IoctlSetHwTstamp updates the hardware timestamping configuration for
+// the network device specified by ifname.
+func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error {
+	ifr, err := NewIfreq(ifname)
+	if err != nil {
+		return err
+	}
+	ifrd := ifr.withData(unsafe.Pointer(cfg))
+	return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd)
+}
+
+// FdToClockID derives the clock ID from the file descriptor number
+// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is
+// suitable for system calls like ClockGettime.
+func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) }
+
+// IoctlPtpClockGetcaps returns the description of a given PTP device.
+func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) {
+	var value PtpClockCaps
+	err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value))
+	return &value, err
+}
+
+// IoctlPtpSysOffsetPrecise returns a description of the clock
+// offset compared to the system clock.
+func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) {
+	var value PtpSysOffsetPrecise
+	err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value))
+	return &value, err
+}
+
+// IoctlPtpSysOffsetExtended returns an extended description of the
+// clock offset compared to the system clock. The samples parameter
+// specifies the desired number of measurements.
+func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) {
+	value := PtpSysOffsetExtended{Samples: uint32(samples)}
+	err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value))
+	return &value, err
+}
+
+// IoctlPtpPinGetfunc returns the configuration of the specified
+// I/O pin on given PTP device.
+func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) {
+	value := PtpPinDesc{Index: uint32(index)}
+	err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value))
+	return &value, err
+}
+
+// IoctlPtpPinSetfunc updates configuration of the specified PTP
+// I/O pin.
+func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error {
+	return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd))
+}
+
+// IoctlPtpPeroutRequest configures the periodic output mode of the
+// PTP I/O pins.
+func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error {
+	return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r))
+}
+
+// IoctlPtpExttsRequest configures the external timestamping mode
+// of the PTP I/O pins.
+func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error {
+	return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r))
+}
+
 // IoctlGetWatchdogInfo fetches information about a watchdog device from the
 // Linux watchdog API. For more information, see:
 // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index ac54ecab..6ab02b6c 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -158,6 +158,16 @@ includes_Linux='
 #endif
 #define _GNU_SOURCE
 
+// See the description in unix/linux/types.go
+#if defined(__ARM_EABI__) || \
+	(defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \
+	(defined(__powerpc__) && (!defined(__powerpc64__)))
+# ifdef   _TIME_BITS
+#  undef  _TIME_BITS
+# endif
+# define  _TIME_BITS 32
+#endif
+
 // <sys/ioctl.h> is broken on powerpc64, as it fails to include definitions of
 // these structures. We just include them copied from <bits/termios.h>.
 #if defined(__powerpc__)
@@ -256,6 +266,7 @@ struct ltchars {
 #include <linux/nsfs.h>
 #include <linux/perf_event.h>
 #include <linux/pps.h>
+#include <linux/ptp_clock.h>
 #include <linux/ptrace.h>
 #include <linux/random.h>
 #include <linux/reboot.h>
@@ -527,6 +538,7 @@ ccflags="$@"
 		$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ ||
 		$2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ ||
 		$2 ~ /^NFC_.*_(MAX)?SIZE$/ ||
+		$2 ~ /^PTP_/ ||
 		$2 ~ /^RAW_PAYLOAD_/ ||
 		$2 ~ /^[US]F_/ ||
 		$2 ~ /^TP_STATUS_/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index f08abd43..230a9454 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 //sys	ClockAdjtime(clockid int32, buf *Timex) (state int, err error)
 //sys	ClockGetres(clockid int32, res *Timespec) (err error)
 //sys	ClockGettime(clockid int32, time *Timespec) (err error)
+//sys	ClockSettime(clockid int32, time *Timespec) (err error)
 //sys	ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error)
 //sys	Close(fd int) (err error)
 //sys	CloseRange(first uint, last uint, flags uint) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
index 312ae6ac..7bf5c04b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
@@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) {
 	return mapper.Munmap(b)
 }
 
+func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) {
+	xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset)
+	return unsafe.Pointer(xaddr), err
+}
+
+func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) {
+	return mapper.munmap(uintptr(addr), length)
+}
+
 //sys   Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A
 //sysnb	Getgid() (gid int)
 //sysnb	Getpid() (pid int)
@@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) {
 // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/
 func isSpecialPath(path []byte) (v bool) {
 	var special = [4][8]byte{
-		[8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'},
-		[8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'},
-		[8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'},
-		[8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}}
+		{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'},
+		{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'},
+		{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'},
+		{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}}
 
 	var i, j int
 	for i = 0; i < len(special); i++ {
@@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) {
 //sys	Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT
 //sys	Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT
 //sys	Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT
+
+func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) {
+	runtime.EnterSyscall()
+	r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg)
+	runtime.ExitSyscall()
+	val = int(r0)
+	if int64(r0) == -1 {
+		err = errnoErr2(e1, e2)
+	}
+	return
+}
+
+func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) {
+	switch op.(type) {
+	case *Flock_t:
+		err = FcntlFlock(fd, cmd, op.(*Flock_t))
+		if err != nil {
+			ret = -1
+		}
+		return
+	case int:
+		return FcntlInt(fd, cmd, op.(int))
+	case *F_cnvrt:
+		return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt))))
+	case unsafe.Pointer:
+		return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer)))
+	default:
+		return -1, EINVAL
+	}
+	return
+}
+
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+	if raceenabled {
+		raceReleaseMerge(unsafe.Pointer(&ioSync))
+	}
+	return sendfile(outfd, infd, offset, count)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+	// TODO: use LE call instead if the call is implemented
+	originalOffset, err := Seek(infd, 0, SEEK_CUR)
+	if err != nil {
+		return -1, err
+	}
+	//start reading data from in_fd
+	if offset != nil {
+		_, err := Seek(infd, *offset, SEEK_SET)
+		if err != nil {
+			return -1, err
+		}
+	}
+
+	buf := make([]byte, count)
+	readBuf := make([]byte, 0)
+	var n int = 0
+	for i := 0; i < count; i += n {
+		n, err := Read(infd, buf)
+		if n == 0 {
+			if err != nil {
+				return -1, err
+			} else { // EOF
+				break
+			}
+		}
+		readBuf = append(readBuf, buf...)
+		buf = buf[0:0]
+	}
+
+	n2, err := Write(outfd, readBuf)
+	if err != nil {
+		return -1, err
+	}
+
+	//When sendfile() returns, this variable will be set to the
+	// offset of the byte following the last byte that was read.
+	if offset != nil {
+		*offset = *offset + int64(n)
+		// If offset is not NULL, then sendfile() does not modify the file
+		// offset of in_fd
+		_, err := Seek(infd, originalOffset, SEEK_SET)
+		if err != nil {
+			return -1, err
+		}
+	}
+	return n2, nil
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index de3b4624..ccba391c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -2625,6 +2625,28 @@ const (
 	PR_UNALIGN_NOPRINT                          = 0x1
 	PR_UNALIGN_SIGBUS                           = 0x2
 	PSTOREFS_MAGIC                              = 0x6165676c
+	PTP_CLK_MAGIC                               = '='
+	PTP_ENABLE_FEATURE                          = 0x1
+	PTP_EXTTS_EDGES                             = 0x6
+	PTP_EXTTS_EVENT_VALID                       = 0x1
+	PTP_EXTTS_V1_VALID_FLAGS                    = 0x7
+	PTP_EXTTS_VALID_FLAGS                       = 0x1f
+	PTP_EXT_OFFSET                              = 0x10
+	PTP_FALLING_EDGE                            = 0x4
+	PTP_MAX_SAMPLES                             = 0x19
+	PTP_PEROUT_DUTY_CYCLE                       = 0x2
+	PTP_PEROUT_ONE_SHOT                         = 0x1
+	PTP_PEROUT_PHASE                            = 0x4
+	PTP_PEROUT_V1_VALID_FLAGS                   = 0x0
+	PTP_PEROUT_VALID_FLAGS                      = 0x7
+	PTP_PIN_GETFUNC                             = 0xc0603d06
+	PTP_PIN_GETFUNC2                            = 0xc0603d0f
+	PTP_RISING_EDGE                             = 0x2
+	PTP_STRICT_FLAGS                            = 0x8
+	PTP_SYS_OFFSET_EXTENDED                     = 0xc4c03d09
+	PTP_SYS_OFFSET_EXTENDED2                    = 0xc4c03d12
+	PTP_SYS_OFFSET_PRECISE                      = 0xc0403d08
+	PTP_SYS_OFFSET_PRECISE2                     = 0xc0403d11
 	PTRACE_ATTACH                               = 0x10
 	PTRACE_CONT                                 = 0x7
 	PTRACE_DETACH                               = 0x11
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 8aa6d77c..0c00cb3f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -237,6 +237,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETFPXREGS                = 0x12
 	PTRACE_GET_THREAD_AREA           = 0x19
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index da428f42..dfb36455 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -237,6 +237,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_ARCH_PRCTL                = 0x1e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETFPXREGS                = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index bf45bfec..d46dcf78 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_GETCRUNCHREGS             = 0x19
 	PTRACE_GETFDPIC                  = 0x1f
 	PTRACE_GETFDPIC_EXEC             = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 71c67162..3af3248a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -240,6 +240,20 @@ const (
 	PROT_BTI                         = 0x10
 	PROT_MTE                         = 0x20
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_PEEKMTETAGS               = 0x21
 	PTRACE_POKEMTETAGS               = 0x22
 	PTRACE_SYSEMU                    = 0x1f
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 9476628f..292bcf02 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -238,6 +238,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_SYSEMU                    = 0x1f
 	PTRACE_SYSEMU_SINGLESTEP         = 0x20
 	RLIMIT_AS                        = 0x9
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index b9e85f3c..782b7110 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x20007434
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PR_SET_PTRACER_ANY               = 0xffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GET_THREAD_AREA           = 0x19
 	PTRACE_GET_THREAD_AREA_3264      = 0xc4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index a48b68a7..84973fd9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x20007434
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GET_THREAD_AREA           = 0x19
 	PTRACE_GET_THREAD_AREA_3264      = 0xc4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index ea00e852..6d9cbc3b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x20007434
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GET_THREAD_AREA           = 0x19
 	PTRACE_GET_THREAD_AREA_3264      = 0xc4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 91c64687..5f9fedbc 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x20007434
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PR_SET_PTRACER_ANY               = 0xffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GET_THREAD_AREA           = 0x19
 	PTRACE_GET_THREAD_AREA_3264      = 0xc4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 8cbf38d6..bb0026ee 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -237,6 +237,20 @@ const (
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PROT_SAO                         = 0x10
 	PR_SET_PTRACER_ANY               = 0xffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETEVRREGS                = 0x14
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETREGS64                 = 0x16
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index a2df7341..46120db5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -237,6 +237,20 @@ const (
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PROT_SAO                         = 0x10
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETEVRREGS                = 0x14
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETREGS64                 = 0x16
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 24791379..5c951634 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -237,6 +237,20 @@ const (
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PROT_SAO                         = 0x10
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETEVRREGS                = 0x14
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETREGS64                 = 0x16
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index d265f146..11a84d5a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_GETFDPIC                  = 0x21
 	PTRACE_GETFDPIC_EXEC             = 0x0
 	PTRACE_GETFDPIC_INTERP           = 0x1
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 3f2d6443..f78c4617 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_DISABLE_TE                = 0x5010
 	PTRACE_ENABLE_TE                 = 0x5009
 	PTRACE_GET_LAST_BREAK            = 0x5006
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 5d8b727a..aeb777c3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -239,6 +239,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x20007434
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETFPAREGS                = 0x14
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETFPREGS64               = 0x19
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index af30da55..5cc1e8eb 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ClockSettime(clockid int32, time *Timespec) (err error) {
+	_, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
 	_, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 3a69e454..8daaf3fa 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -1752,12 +1752,6 @@ const (
 	IFLA_IPVLAN_UNSPEC                         = 0x0
 	IFLA_IPVLAN_MODE                           = 0x1
 	IFLA_IPVLAN_FLAGS                          = 0x2
-	NETKIT_NEXT                                = -0x1
-	NETKIT_PASS                                = 0x0
-	NETKIT_DROP                                = 0x2
-	NETKIT_REDIRECT                            = 0x7
-	NETKIT_L2                                  = 0x0
-	NETKIT_L3                                  = 0x1
 	IFLA_NETKIT_UNSPEC                         = 0x0
 	IFLA_NETKIT_PEER_INFO                      = 0x1
 	IFLA_NETKIT_PRIMARY                        = 0x2
@@ -1796,6 +1790,7 @@ const (
 	IFLA_VXLAN_DF                              = 0x1d
 	IFLA_VXLAN_VNIFILTER                       = 0x1e
 	IFLA_VXLAN_LOCALBYPASS                     = 0x1f
+	IFLA_VXLAN_LABEL_POLICY                    = 0x20
 	IFLA_GENEVE_UNSPEC                         = 0x0
 	IFLA_GENEVE_ID                             = 0x1
 	IFLA_GENEVE_REMOTE                         = 0x2
@@ -1825,6 +1820,8 @@ const (
 	IFLA_GTP_ROLE                              = 0x4
 	IFLA_GTP_CREATE_SOCKETS                    = 0x5
 	IFLA_GTP_RESTART_COUNT                     = 0x6
+	IFLA_GTP_LOCAL                             = 0x7
+	IFLA_GTP_LOCAL6                            = 0x8
 	IFLA_BOND_UNSPEC                           = 0x0
 	IFLA_BOND_MODE                             = 0x1
 	IFLA_BOND_ACTIVE_SLAVE                     = 0x2
@@ -1857,6 +1854,7 @@ const (
 	IFLA_BOND_AD_LACP_ACTIVE                   = 0x1d
 	IFLA_BOND_MISSED_MAX                       = 0x1e
 	IFLA_BOND_NS_IP6_TARGET                    = 0x1f
+	IFLA_BOND_COUPLED_CONTROL                  = 0x20
 	IFLA_BOND_AD_INFO_UNSPEC                   = 0x0
 	IFLA_BOND_AD_INFO_AGGREGATOR               = 0x1
 	IFLA_BOND_AD_INFO_NUM_PORTS                = 0x2
@@ -1925,6 +1923,7 @@ const (
 	IFLA_HSR_SEQ_NR                            = 0x5
 	IFLA_HSR_VERSION                           = 0x6
 	IFLA_HSR_PROTOCOL                          = 0x7
+	IFLA_HSR_INTERLINK                         = 0x8
 	IFLA_STATS_UNSPEC                          = 0x0
 	IFLA_STATS_LINK_64                         = 0x1
 	IFLA_STATS_LINK_XSTATS                     = 0x2
@@ -1977,6 +1976,15 @@ const (
 	IFLA_DSA_MASTER                            = 0x1
 )
 
+const (
+	NETKIT_NEXT     = -0x1
+	NETKIT_PASS     = 0x0
+	NETKIT_DROP     = 0x2
+	NETKIT_REDIRECT = 0x7
+	NETKIT_L2       = 0x0
+	NETKIT_L3       = 0x1
+)
+
 const (
 	NF_INET_PRE_ROUTING  = 0x0
 	NF_INET_LOCAL_IN     = 0x1
@@ -4110,6 +4118,106 @@ type EthtoolDrvinfo struct {
 	Regdump_len  uint32
 }
 
+type EthtoolTsInfo struct {
+	Cmd             uint32
+	So_timestamping uint32
+	Phc_index       int32
+	Tx_types        uint32
+	Tx_reserved     [3]uint32
+	Rx_filters      uint32
+	Rx_reserved     [3]uint32
+}
+
+type HwTstampConfig struct {
+	Flags     int32
+	Tx_type   int32
+	Rx_filter int32
+}
+
+const (
+	HWTSTAMP_FILTER_NONE            = 0x0
+	HWTSTAMP_FILTER_ALL             = 0x1
+	HWTSTAMP_FILTER_SOME            = 0x2
+	HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3
+	HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6
+	HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9
+	HWTSTAMP_FILTER_PTP_V2_EVENT    = 0xc
+)
+
+const (
+	HWTSTAMP_TX_OFF          = 0x0
+	HWTSTAMP_TX_ON           = 0x1
+	HWTSTAMP_TX_ONESTEP_SYNC = 0x2
+)
+
+type (
+	PtpClockCaps struct {
+		Max_adj            int32
+		N_alarm            int32
+		N_ext_ts           int32
+		N_per_out          int32
+		Pps                int32
+		N_pins             int32
+		Cross_timestamping int32
+		Adjust_phase       int32
+		Max_phase_adj      int32
+		Rsv                [11]int32
+	}
+	PtpClockTime struct {
+		Sec      int64
+		Nsec     uint32
+		Reserved uint32
+	}
+	PtpExttsEvent struct {
+		T     PtpClockTime
+		Index uint32
+		Flags uint32
+		Rsv   [2]uint32
+	}
+	PtpExttsRequest struct {
+		Index uint32
+		Flags uint32
+		Rsv   [2]uint32
+	}
+	PtpPeroutRequest struct {
+		StartOrPhase PtpClockTime
+		Period       PtpClockTime
+		Index        uint32
+		Flags        uint32
+		On           PtpClockTime
+	}
+	PtpPinDesc struct {
+		Name  [64]byte
+		Index uint32
+		Func  uint32
+		Chan  uint32
+		Rsv   [5]uint32
+	}
+	PtpSysOffset struct {
+		Samples uint32
+		Rsv     [3]uint32
+		Ts      [51]PtpClockTime
+	}
+	PtpSysOffsetExtended struct {
+		Samples uint32
+		Rsv     [3]uint32
+		Ts      [25][3]PtpClockTime
+	}
+	PtpSysOffsetPrecise struct {
+		Device   PtpClockTime
+		Realtime PtpClockTime
+		Monoraw  PtpClockTime
+		Rsv      [4]uint32
+	}
+)
+
+const (
+	PTP_PF_NONE    = 0x0
+	PTP_PF_EXTTS   = 0x1
+	PTP_PF_PEROUT  = 0x2
+	PTP_PF_PHYSYNC = 0x3
+)
+
 type (
 	HIDRawReportDescriptor struct {
 		Size  uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
index d9a13af4..2e5d5a44 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
@@ -377,6 +377,12 @@ type Flock_t struct {
 	Pid    int32
 }
 
+type F_cnvrt struct {
+	Cvtcmd int32
+	Pccsid int16
+	Fccsid int16
+}
+
 type Termios struct {
 	Cflag uint32
 	Iflag uint32
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 5cee9a31..4510bfc3 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -725,20 +725,12 @@ func DurationSinceBoot() time.Duration {
 }
 
 func Ftruncate(fd Handle, length int64) (err error) {
-	curoffset, e := Seek(fd, 0, 1)
-	if e != nil {
-		return e
-	}
-	defer Seek(fd, curoffset, 0)
-	_, e = Seek(fd, length, 0)
-	if e != nil {
-		return e
+	type _FILE_END_OF_FILE_INFO struct {
+		EndOfFile int64
 	}
-	e = SetEndOfFile(fd)
-	if e != nil {
-		return e
-	}
-	return nil
+	var info _FILE_END_OF_FILE_INFO
+	info.EndOfFile = length
+	return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
 }
 
 func Gettimeofday(tv *Timeval) (err error) {
@@ -894,6 +886,11 @@ const socket_error = uintptr(^uint32(0))
 //sys	GetACP() (acp uint32) = kernel32.GetACP
 //sys	MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
 //sys	getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx
+//sys   GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex
+//sys   GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry
+//sys   NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange
+//sys   NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange
+//sys   CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2
 
 // For testing: clients can set this flag to force
 // creation of IPv6 sockets to return EAFNOSUPPORT.
@@ -1685,13 +1682,16 @@ func (s NTStatus) Error() string {
 // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for
 // the more common *uint16 string type.
 func NewNTUnicodeString(s string) (*NTUnicodeString, error) {
-	var u NTUnicodeString
-	s16, err := UTF16PtrFromString(s)
+	s16, err := UTF16FromString(s)
 	if err != nil {
 		return nil, err
 	}
-	RtlInitUnicodeString(&u, s16)
-	return &u, nil
+	n := uint16(len(s16) * 2)
+	return &NTUnicodeString{
+		Length:        n - 2, // subtract 2 bytes for the NULL terminator
+		MaximumLength: n,
+		Buffer:        &s16[0],
+	}, nil
 }
 
 // Slice returns a uint16 slice that aliases the data in the NTUnicodeString.
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 7b97a154..51311e20 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -2203,6 +2203,132 @@ const (
 	IfOperStatusLowerLayerDown = 7
 )
 
+const (
+	IF_MAX_PHYS_ADDRESS_LENGTH = 32
+	IF_MAX_STRING_SIZE         = 256
+)
+
+// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex.
+const (
+	MibIfEntryNormal                  = 0
+	MibIfEntryNormalWithoutStatistics = 2
+)
+
+// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type.
+const (
+	MibParameterNotification = 0
+	MibAddInstance           = 1
+	MibDeleteInstance        = 2
+	MibInitialNotification   = 3
+)
+
+// MibIfRow2 stores information about a particular interface. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2.
+type MibIfRow2 struct {
+	InterfaceLuid               uint64
+	InterfaceIndex              uint32
+	InterfaceGuid               GUID
+	Alias                       [IF_MAX_STRING_SIZE + 1]uint16
+	Description                 [IF_MAX_STRING_SIZE + 1]uint16
+	PhysicalAddressLength       uint32
+	PhysicalAddress             [IF_MAX_PHYS_ADDRESS_LENGTH]uint8
+	PermanentPhysicalAddress    [IF_MAX_PHYS_ADDRESS_LENGTH]uint8
+	Mtu                         uint32
+	Type                        uint32
+	TunnelType                  uint32
+	MediaType                   uint32
+	PhysicalMediumType          uint32
+	AccessType                  uint32
+	DirectionType               uint32
+	InterfaceAndOperStatusFlags uint8
+	OperStatus                  uint32
+	AdminStatus                 uint32
+	MediaConnectState           uint32
+	NetworkGuid                 GUID
+	ConnectionType              uint32
+	TransmitLinkSpeed           uint64
+	ReceiveLinkSpeed            uint64
+	InOctets                    uint64
+	InUcastPkts                 uint64
+	InNUcastPkts                uint64
+	InDiscards                  uint64
+	InErrors                    uint64
+	InUnknownProtos             uint64
+	InUcastOctets               uint64
+	InMulticastOctets           uint64
+	InBroadcastOctets           uint64
+	OutOctets                   uint64
+	OutUcastPkts                uint64
+	OutNUcastPkts               uint64
+	OutDiscards                 uint64
+	OutErrors                   uint64
+	OutUcastOctets              uint64
+	OutMulticastOctets          uint64
+	OutBroadcastOctets          uint64
+	OutQLen                     uint64
+}
+
+// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row.
+type MibUnicastIpAddressRow struct {
+	Address            RawSockaddrInet6 // SOCKADDR_INET union
+	InterfaceLuid      uint64
+	InterfaceIndex     uint32
+	PrefixOrigin       uint32
+	SuffixOrigin       uint32
+	ValidLifetime      uint32
+	PreferredLifetime  uint32
+	OnLinkPrefixLength uint8
+	SkipAsSource       uint8
+	DadState           uint32
+	ScopeId            uint32
+	CreationTimeStamp  Filetime
+}
+
+const ScopeLevelCount = 16
+
+// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface.
+// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row.
+type MibIpInterfaceRow struct {
+	Family                               uint16
+	InterfaceLuid                        uint64
+	InterfaceIndex                       uint32
+	MaxReassemblySize                    uint32
+	InterfaceIdentifier                  uint64
+	MinRouterAdvertisementInterval       uint32
+	MaxRouterAdvertisementInterval       uint32
+	AdvertisingEnabled                   uint8
+	ForwardingEnabled                    uint8
+	WeakHostSend                         uint8
+	WeakHostReceive                      uint8
+	UseAutomaticMetric                   uint8
+	UseNeighborUnreachabilityDetection   uint8
+	ManagedAddressConfigurationSupported uint8
+	OtherStatefulConfigurationSupported  uint8
+	AdvertiseDefaultRoute                uint8
+	RouterDiscoveryBehavior              uint32
+	DadTransmits                         uint32
+	BaseReachableTime                    uint32
+	RetransmitTime                       uint32
+	PathMtuDiscoveryTimeout              uint32
+	LinkLocalAddressBehavior             uint32
+	LinkLocalAddressTimeout              uint32
+	ZoneIndices                          [ScopeLevelCount]uint32
+	SitePrefixLength                     uint32
+	Metric                               uint32
+	NlMtu                                uint32
+	Connected                            uint8
+	SupportsWakeUpPatterns               uint8
+	SupportsNeighborDiscovery            uint8
+	SupportsRouterDiscovery              uint8
+	ReachableTime                        uint32
+	TransmitOffload                      uint32
+	ReceiveOffload                       uint32
+	DisableDefaultRoutes                 uint8
+}
+
 // Console related constants used for the mode parameter to SetConsoleMode. See
 // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details.
 
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 4c2e1bdc..6f525288 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -181,10 +181,15 @@ var (
 	procDnsRecordListFree                                    = moddnsapi.NewProc("DnsRecordListFree")
 	procDwmGetWindowAttribute                                = moddwmapi.NewProc("DwmGetWindowAttribute")
 	procDwmSetWindowAttribute                                = moddwmapi.NewProc("DwmSetWindowAttribute")
+	procCancelMibChangeNotify2                               = modiphlpapi.NewProc("CancelMibChangeNotify2")
 	procGetAdaptersAddresses                                 = modiphlpapi.NewProc("GetAdaptersAddresses")
 	procGetAdaptersInfo                                      = modiphlpapi.NewProc("GetAdaptersInfo")
 	procGetBestInterfaceEx                                   = modiphlpapi.NewProc("GetBestInterfaceEx")
 	procGetIfEntry                                           = modiphlpapi.NewProc("GetIfEntry")
+	procGetIfEntry2Ex                                        = modiphlpapi.NewProc("GetIfEntry2Ex")
+	procGetUnicastIpAddressEntry                             = modiphlpapi.NewProc("GetUnicastIpAddressEntry")
+	procNotifyIpInterfaceChange                              = modiphlpapi.NewProc("NotifyIpInterfaceChange")
+	procNotifyUnicastIpAddressChange                         = modiphlpapi.NewProc("NotifyUnicastIpAddressChange")
 	procAddDllDirectory                                      = modkernel32.NewProc("AddDllDirectory")
 	procAssignProcessToJobObject                             = modkernel32.NewProc("AssignProcessToJobObject")
 	procCancelIo                                             = modkernel32.NewProc("CancelIo")
@@ -1606,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si
 	return
 }
 
+func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
 func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
 	r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
 	if r0 != 0 {
@@ -1638,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
 	return
 }
 
+func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+	var _p0 uint32
+	if initialNotification {
+		_p0 = 1
+	}
+	r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+	var _p0 uint32
+	if initialNotification {
+		_p0 = 1
+	}
+	r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
 func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
 	r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
 	cookie = uintptr(r0)
diff --git a/vendor/golang.org/x/term/README.md b/vendor/golang.org/x/term/README.md
index d03d0aef..05ff623f 100644
--- a/vendor/golang.org/x/term/README.md
+++ b/vendor/golang.org/x/term/README.md
@@ -4,16 +4,13 @@
 
 This repository provides Go terminal and console support packages.
 
-## Download/Install
-
-The easiest way to install is to run `go get -u golang.org/x/term`. You can
-also manually git clone the repository to `$GOPATH/src/golang.org/x/term`.
-
 ## Report Issues / Send Patches
 
 This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html.
+this repository, see https://go.dev/doc/contribute.
+
+The git repository is https://go.googlesource.com/term.
 
 The main issue tracker for the term repository is located at
-https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the
+https://go.dev/issues. Prefix your issue with "x/term:" in the
 subject line, so it is easy to find.
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
index 18d1adb0..a6b5ed0a 100644
--- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go
+++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
@@ -344,7 +344,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r
 }
 
 // UsesImport reports whether a given import is used.
+// The provided File must have been parsed with syntactic object resolution
+// (not using go/parser.SkipObjectResolution).
 func UsesImport(f *ast.File, path string) (used bool) {
+	if f.Scope == nil {
+		panic("file f was not parsed with syntactic object resolution")
+	}
 	spec := importSpec(f, path)
 	if spec == nil {
 		return
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
index 137cc8df..f3ab0a2e 100644
--- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -2,22 +2,64 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Package gcexportdata provides functions for locating, reading, and
-// writing export data files containing type information produced by the
-// gc compiler.  This package supports go1.7 export data format and all
-// later versions.
-//
-// Although it might seem convenient for this package to live alongside
-// go/types in the standard library, this would cause version skew
-// problems for developer tools that use it, since they must be able to
-// consume the outputs of the gc compiler both before and after a Go
-// update such as from Go 1.7 to Go 1.8.  Because this package lives in
-// golang.org/x/tools, sites can update their version of this repo some
-// time before the Go 1.8 release and rebuild and redeploy their
-// developer tools, which will then be able to consume both Go 1.7 and
-// Go 1.8 export data files, so they will work before and after the
-// Go update. (See discussion at https://golang.org/issue/15651.)
-package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
+// Package gcexportdata provides functions for reading and writing
+// export data, which is a serialized description of the API of a Go
+// package including the names, kinds, types, and locations of all
+// exported declarations.
+//
+// The standard Go compiler (cmd/compile) writes an export data file
+// for each package it compiles, which it later reads when compiling
+// packages that import the earlier one. The compiler must thus
+// contain logic to both write and read export data.
+// (See the "Export" section in the cmd/compile/README file.)
+//
+// The [Read] function in this package can read files produced by the
+// compiler, producing [go/types] data structures. As a matter of
+// policy, Read supports export data files produced by only the last
+// two Go releases plus tip; see https://go.dev/issue/68898. The
+// export data files produced by the compiler contain additional
+// details related to generics, inlining, and other optimizations that
+// cannot be decoded by the [Read] function.
+//
+// In files written by the compiler, the export data is not at the
+// start of the file. Before calling Read, use [NewReader] to locate
+// the desired portion of the file.
+//
+// The [Write] function in this package encodes the exported API of a
+// Go package ([types.Package]) as a file. Such files can be later
+// decoded by Read, but cannot be consumed by the compiler.
+//
+// # Future changes
+//
+// Although Read supports the formats written by both Write and the
+// compiler, the two are quite different, and there is an open
+// proposal (https://go.dev/issue/69491) to separate these APIs.
+//
+// Under that proposal, this package would ultimately provide only the
+// Read operation for compiler export data, which must be defined in
+// this module (golang.org/x/tools), not in the standard library, to
+// avoid version skew for developer tools that need to read compiler
+// export data both before and after a Go release, such as from Go
+// 1.23 to Go 1.24. Because this package lives in the tools module,
+// clients can update their version of the module some time before the
+// Go 1.24 release and rebuild and redeploy their tools, which will
+// then be able to consume both Go 1.23 and Go 1.24 export data files,
+// so they will work before and after the Go update. (See discussion
+// at https://go.dev/issue/15651.)
+//
+// The operations to import and export [go/types] data structures
+// would be defined in the go/types package as Import and Export.
+// [Write] would (eventually) delegate to Export,
+// and [Read], when it detects a file produced by Export,
+// would delegate to Import.
+//
+// # Deprecations
+//
+// The [NewImporter] and [Find] functions are deprecated and should
+// not be used in new code. The [WriteBundle] and [ReadBundle]
+// functions are experimental, and there is an open proposal to
+// deprecate them (https://go.dev/issue/69573).
+package gcexportdata
 
 import (
 	"bufio"
@@ -100,6 +142,11 @@ func readAll(r io.Reader) ([]byte, error) {
 // Read reads export data from in, decodes it, and returns type
 // information for the package.
 //
+// Read is capable of reading export data produced by [Write] at the
+// same source code version, or by the last two Go releases (plus tip)
+// of the standard Go compiler. Reading files from older compilers may
+// produce an error.
+//
 // The package path (effectively its linker symbol prefix) is
 // specified by path, since unlike the package name, this information
 // may not be recorded in the export data.
@@ -128,14 +175,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
 	// (from "version"). Select appropriate importer.
 	if len(data) > 0 {
 		switch data[0] {
-		case 'v', 'c', 'd': // binary, till go1.10
+		case 'v', 'c', 'd':
+			// binary, produced by cmd/compile till go1.10
 			return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
 
-		case 'i': // indexed, till go1.19
+		case 'i':
+			// indexed, produced by cmd/compile till go1.19,
+			// and also by [Write].
+			//
+			// If proposal #69491 is accepted, go/types
+			// serialization will be implemented by
+			// types.Export, to which Write would eventually
+			// delegate (explicitly dropping any pretence at
+			// inter-version Write-Read compatibility).
+			// This [Read] function would delegate to types.Import
+			// when it detects that the file was produced by Export.
 			_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
 			return pkg, err
 
-		case 'u': // unified, from go1.20
+		case 'u':
+			// unified, produced by cmd/compile since go1.20
 			_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
 			return pkg, err
 
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
index 8f7afcb5..96db9daf 100644
--- a/vendor/golang.org/x/tools/go/packages/external.go
+++ b/vendor/golang.org/x/tools/go/packages/external.go
@@ -79,7 +79,7 @@ type DriverResponse struct {
 
 // driver is the type for functions that query the build system for the
 // packages named by the patterns.
-type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
+type driver func(cfg *Config, patterns []string) (*DriverResponse, error)
 
 // findExternalDriver returns the file path of a tool that supplies
 // the build system package structure, or "" if not found.
@@ -103,7 +103,7 @@ func findExternalDriver(cfg *Config) driver {
 			return nil
 		}
 	}
-	return func(cfg *Config, words ...string) (*DriverResponse, error) {
+	return func(cfg *Config, patterns []string) (*DriverResponse, error) {
 		req, err := json.Marshal(DriverRequest{
 			Mode:       cfg.Mode,
 			Env:        cfg.Env,
@@ -117,7 +117,7 @@ func findExternalDriver(cfg *Config) driver {
 
 		buf := new(bytes.Buffer)
 		stderr := new(bytes.Buffer)
-		cmd := exec.CommandContext(cfg.Context, tool, words...)
+		cmd := exec.CommandContext(cfg.Context, tool, patterns...)
 		cmd.Dir = cfg.Dir
 		// The cwd gets resolved to the real path. On Darwin, where
 		// /tmp is a symlink, this breaks anything that expects the
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
index 1a3a5b44..76f910ec 100644
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -80,6 +80,12 @@ type golistState struct {
 	cfg *Config
 	ctx context.Context
 
+	runner *gocommand.Runner
+
+	// overlay is the JSON file that encodes the Config.Overlay
+	// mapping, used by 'go list -overlay=...'.
+	overlay string
+
 	envOnce    sync.Once
 	goEnvError error
 	goEnv      map[string]string
@@ -127,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string {
 // goListDriver uses the go list command to interpret the patterns and produce
 // the build system package structure.
 // See driver for more details.
-func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) {
+//
+// overlay is the JSON file that encodes the cfg.Overlay
+// mapping, used by 'go list -overlay=...'
+func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) {
 	// Make sure that any asynchronous go commands are killed when we return.
 	parentCtx := cfg.Context
 	if parentCtx == nil {
@@ -142,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error
 		cfg:        cfg,
 		ctx:        ctx,
 		vendorDirs: map[string]bool{},
+		overlay:    overlay,
+		runner:     runner,
 	}
 
 	// Fill in response.Sizes asynchronously if necessary.
-	if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
+	if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
 		errCh := make(chan error)
 		go func() {
-			compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), cfg.gocmdRunner)
+			compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner)
 			response.dr.Compiler = compiler
 			response.dr.Arch = arch
 			errCh <- err
@@ -681,7 +692,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
 // getGoVersion returns the effective minor version of the go command.
 func (state *golistState) getGoVersion() (int, error) {
 	state.goVersionOnce.Do(func() {
-		state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)
+		state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner)
 	})
 	return state.goVersion, state.goVersionError
 }
@@ -751,7 +762,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
 		}
 	}
 	addFields("Name", "ImportPath", "Error") // These fields are always needed
-	if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 {
+	if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
 		addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles",
 			"CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles",
 			"SwigFiles", "SwigCXXFiles", "SysoFiles")
@@ -759,7 +770,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
 			addFields("TestGoFiles", "XTestGoFiles")
 		}
 	}
-	if cfg.Mode&NeedTypes != 0 {
+	if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
 		// CompiledGoFiles seems to be required for the test case TestCgoNoSyntax,
 		// even when -compiled isn't passed in.
 		// TODO(#52435): Should we make the test ask for -compiled, or automatically
@@ -840,7 +851,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation {
 		Env:        cfg.Env,
 		Logf:       cfg.Logf,
 		WorkingDir: cfg.Dir,
-		Overlay:    cfg.goListOverlayFile,
+		Overlay:    state.overlay,
 	}
 }
 
@@ -851,11 +862,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
 	inv := state.cfgInvocation()
 	inv.Verb = verb
 	inv.Args = args
-	gocmdRunner := cfg.gocmdRunner
-	if gocmdRunner == nil {
-		gocmdRunner = &gocommand.Runner{}
-	}
-	stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv)
+
+	stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv)
 	if err != nil {
 		// Check for 'go' executable not being found.
 		if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
@@ -879,6 +887,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
 			return nil, friendlyErr
 		}
 
+		// Return an error if 'go list' failed due to missing tools in
+		// $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606).
+		if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) {
+			return nil, friendlyErr
+		}
+
 		// Is there an error running the C compiler in cgo? This will be reported in the "Error" field
 		// and should be suppressed by go list -e.
 		//
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
index f227f1ba..2ecc6423 100644
--- a/vendor/golang.org/x/tools/go/packages/packages.go
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -16,13 +16,13 @@ import (
 	"go/scanner"
 	"go/token"
 	"go/types"
-	"io"
 	"log"
 	"os"
 	"path/filepath"
 	"runtime"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"golang.org/x/sync/errgroup"
@@ -31,7 +31,6 @@ import (
 	"golang.org/x/tools/internal/gocommand"
 	"golang.org/x/tools/internal/packagesinternal"
 	"golang.org/x/tools/internal/typesinternal"
-	"golang.org/x/tools/internal/versions"
 )
 
 // A LoadMode controls the amount of detail to return when loading.
@@ -56,7 +55,7 @@ const (
 	// NeedName adds Name and PkgPath.
 	NeedName LoadMode = 1 << iota
 
-	// NeedFiles adds GoFiles and OtherFiles.
+	// NeedFiles adds GoFiles, OtherFiles, and IgnoredFiles
 	NeedFiles
 
 	// NeedCompiledGoFiles adds CompiledGoFiles.
@@ -78,7 +77,7 @@ const (
 	// NeedSyntax adds Syntax and Fset.
 	NeedSyntax
 
-	// NeedTypesInfo adds TypesInfo.
+	// NeedTypesInfo adds TypesInfo and Fset.
 	NeedTypesInfo
 
 	// NeedTypesSizes adds TypesSizes.
@@ -145,13 +144,7 @@ const (
 // A Config specifies details about how packages should be loaded.
 // The zero value is a valid configuration.
 //
-// Calls to Load do not modify this struct.
-//
-// TODO(adonovan): #67702: this is currently false: in fact,
-// calls to [Load] do not modify the public fields of this struct, but
-// may modify hidden fields, so concurrent calls to [Load] must not
-// use the same Config. But perhaps we should reestablish the
-// documented invariant.
+// Calls to [Load] do not modify this struct.
 type Config struct {
 	// Mode controls the level of information returned for each package.
 	Mode LoadMode
@@ -182,19 +175,10 @@ type Config struct {
 	//
 	Env []string
 
-	// gocmdRunner guards go command calls from concurrency errors.
-	gocmdRunner *gocommand.Runner
-
 	// BuildFlags is a list of command-line flags to be passed through to
 	// the build system's query tool.
 	BuildFlags []string
 
-	// modFile will be used for -modfile in go command invocations.
-	modFile string
-
-	// modFlag will be used for -modfile in go command invocations.
-	modFlag string
-
 	// Fset provides source position information for syntax trees and types.
 	// If Fset is nil, Load will use a new fileset, but preserve Fset's value.
 	Fset *token.FileSet
@@ -241,9 +225,13 @@ type Config struct {
 	// drivers may vary in their level of support for overlays.
 	Overlay map[string][]byte
 
-	// goListOverlayFile is the JSON file that encodes the Overlay
-	// mapping, used by 'go list -overlay=...'
-	goListOverlayFile string
+	// -- Hidden configuration fields only for use in x/tools --
+
+	// modFile will be used for -modfile in go command invocations.
+	modFile string
+
+	// modFlag will be used for -modfile in go command invocations.
+	modFlag string
 }
 
 // Load loads and returns the Go packages named by the given patterns.
@@ -334,21 +322,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro
 		} else if !response.NotHandled {
 			return response, true, nil
 		}
-		// (fall through)
+		// not handled: fall through
 	}
 
 	// go list fallback
-	//
+
 	// Write overlays once, as there are many calls
 	// to 'go list' (one per chunk plus others too).
-	overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
+	overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
 	if err != nil {
 		return nil, false, err
 	}
 	defer cleanupOverlay()
-	cfg.goListOverlayFile = overlay
 
-	response, err := callDriverOnChunks(goListDriver, cfg, chunks)
+	var runner gocommand.Runner // (shared across many 'go list' calls)
+	driver := func(cfg *Config, patterns []string) (*DriverResponse, error) {
+		return goListDriver(cfg, &runner, overlayFile, patterns)
+	}
+	response, err := callDriverOnChunks(driver, cfg, chunks)
 	if err != nil {
 		return nil, false, err
 	}
@@ -386,16 +377,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) {
 
 func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) {
 	if len(chunks) == 0 {
-		return driver(cfg)
+		return driver(cfg, nil)
 	}
 	responses := make([]*DriverResponse, len(chunks))
 	errNotHandled := errors.New("driver returned NotHandled")
 	var g errgroup.Group
 	for i, chunk := range chunks {
-		i := i
-		chunk := chunk
 		g.Go(func() (err error) {
-			responses[i], err = driver(cfg, chunk...)
+			responses[i], err = driver(cfg, chunk)
 			if responses[i] != nil && responses[i].NotHandled {
 				err = errNotHandled
 			}
@@ -692,18 +681,19 @@ func (p *Package) String() string { return p.ID }
 // loaderPackage augments Package with state used during the loading phase
 type loaderPackage struct {
 	*Package
-	importErrors map[string]error // maps each bad import to its error
-	loadOnce     sync.Once
-	color        uint8 // for cycle detection
-	needsrc      bool  // load from source (Mode >= LoadTypes)
-	needtypes    bool  // type information is either requested or depended on
-	initial      bool  // package was matched by a pattern
-	goVersion    int   // minor version number of go command on PATH
+	importErrors    map[string]error // maps each bad import to its error
+	preds           []*loaderPackage // packages that import this one
+	unfinishedSuccs atomic.Int32     // number of direct imports not yet loaded
+	color           uint8            // for cycle detection
+	needsrc         bool             // load from source (Mode >= LoadTypes)
+	needtypes       bool             // type information is either requested or depended on
+	initial         bool             // package was matched by a pattern
+	goVersion       int              // minor version number of go command on PATH
 }
 
 // loader holds the working state of a single call to load.
 type loader struct {
-	pkgs map[string]*loaderPackage
+	pkgs map[string]*loaderPackage // keyed by Package.ID
 	Config
 	sizes        types.Sizes // non-nil if needed by mode
 	parseCache   map[string]*parseValue
@@ -749,9 +739,6 @@ func newLoader(cfg *Config) *loader {
 	if ld.Config.Env == nil {
 		ld.Config.Env = os.Environ()
 	}
-	if ld.Config.gocmdRunner == nil {
-		ld.Config.gocmdRunner = &gocommand.Runner{}
-	}
 	if ld.Context == nil {
 		ld.Context = context.Background()
 	}
@@ -765,7 +752,7 @@ func newLoader(cfg *Config) *loader {
 	ld.requestedMode = ld.Mode
 	ld.Mode = impliedLoadMode(ld.Mode)
 
-	if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
+	if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
 		if ld.Fset == nil {
 			ld.Fset = token.NewFileSet()
 		}
@@ -806,7 +793,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 		exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
 		// This package needs type information if the caller requested types and the package is
 		// either a root, or it's a non-root and the user requested dependencies ...
-		needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
+		needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
 		// This package needs source if the call requested source (or types info, which implies source)
 		// and the package is either a root, or itas a non- root and the user requested dependencies...
 		needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
@@ -831,9 +818,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 		}
 	}
 
-	if ld.Mode&NeedImports != 0 {
-		// Materialize the import graph.
-
+	// Materialize the import graph if it is needed (NeedImports),
+	// or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}).
+	var leaves []*loaderPackage // packages with no unfinished successors
+	if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
 		const (
 			white = 0 // new
 			grey  = 1 // in progress
@@ -852,63 +840,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 		// dependency on a package that does. These are the only packages
 		// for which we load source code.
 		var stack []*loaderPackage
-		var visit func(lpkg *loaderPackage) bool
-		visit = func(lpkg *loaderPackage) bool {
-			switch lpkg.color {
-			case black:
-				return lpkg.needsrc
-			case grey:
+		var visit func(from, lpkg *loaderPackage) bool
+		visit = func(from, lpkg *loaderPackage) bool {
+			if lpkg.color == grey {
 				panic("internal error: grey node")
 			}
-			lpkg.color = grey
-			stack = append(stack, lpkg) // push
-			stubs := lpkg.Imports       // the structure form has only stubs with the ID in the Imports
-			lpkg.Imports = make(map[string]*Package, len(stubs))
-			for importPath, ipkg := range stubs {
-				var importErr error
-				imp := ld.pkgs[ipkg.ID]
-				if imp == nil {
-					// (includes package "C" when DisableCgo)
-					importErr = fmt.Errorf("missing package: %q", ipkg.ID)
-				} else if imp.color == grey {
-					importErr = fmt.Errorf("import cycle: %s", stack)
+			if lpkg.color == white {
+				lpkg.color = grey
+				stack = append(stack, lpkg) // push
+				stubs := lpkg.Imports       // the structure form has only stubs with the ID in the Imports
+				lpkg.Imports = make(map[string]*Package, len(stubs))
+				for importPath, ipkg := range stubs {
+					var importErr error
+					imp := ld.pkgs[ipkg.ID]
+					if imp == nil {
+						// (includes package "C" when DisableCgo)
+						importErr = fmt.Errorf("missing package: %q", ipkg.ID)
+					} else if imp.color == grey {
+						importErr = fmt.Errorf("import cycle: %s", stack)
+					}
+					if importErr != nil {
+						if lpkg.importErrors == nil {
+							lpkg.importErrors = make(map[string]error)
+						}
+						lpkg.importErrors[importPath] = importErr
+						continue
+					}
+
+					if visit(lpkg, imp) {
+						lpkg.needsrc = true
+					}
+					lpkg.Imports[importPath] = imp.Package
 				}
-				if importErr != nil {
-					if lpkg.importErrors == nil {
-						lpkg.importErrors = make(map[string]error)
+
+				// -- postorder --
+
+				// Complete type information is required for the
+				// immediate dependencies of each source package.
+				if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
+					for _, ipkg := range lpkg.Imports {
+						ld.pkgs[ipkg.ID].needtypes = true
 					}
-					lpkg.importErrors[importPath] = importErr
-					continue
 				}
 
-				if visit(imp) {
-					lpkg.needsrc = true
+				// NeedTypeSizes causes TypeSizes to be set even
+				// on packages for which types aren't needed.
+				if ld.Mode&NeedTypesSizes != 0 {
+					lpkg.TypesSizes = ld.sizes
 				}
-				lpkg.Imports[importPath] = imp.Package
-			}
 
-			// Complete type information is required for the
-			// immediate dependencies of each source package.
-			if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
-				for _, ipkg := range lpkg.Imports {
-					ld.pkgs[ipkg.ID].needtypes = true
+				// Add packages with no imports directly to the queue of leaves.
+				if len(lpkg.Imports) == 0 {
+					leaves = append(leaves, lpkg)
 				}
+
+				stack = stack[:len(stack)-1] // pop
+				lpkg.color = black
 			}
 
-			// NeedTypeSizes causes TypeSizes to be set even
-			// on packages for which types aren't needed.
-			if ld.Mode&NeedTypesSizes != 0 {
-				lpkg.TypesSizes = ld.sizes
+			// Add edge from predecessor.
+			if from != nil {
+				from.unfinishedSuccs.Add(+1) // incref
+				lpkg.preds = append(lpkg.preds, from)
 			}
-			stack = stack[:len(stack)-1] // pop
-			lpkg.color = black
 
 			return lpkg.needsrc
 		}
 
 		// For each initial package, create its import DAG.
 		for _, lpkg := range initial {
-			visit(lpkg)
+			visit(nil, lpkg)
 		}
 
 	} else {
@@ -921,16 +922,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 
 	// Load type data and syntax if needed, starting at
 	// the initial packages (roots of the import DAG).
-	if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
-		var wg sync.WaitGroup
-		for _, lpkg := range initial {
-			wg.Add(1)
-			go func(lpkg *loaderPackage) {
-				ld.loadRecursive(lpkg)
-				wg.Done()
-			}(lpkg)
+	if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
+
+		// We avoid using g.SetLimit to limit concurrency as
+		// it makes g.Go stop accepting work, which prevents
+		// workers from enqeuing, and thus finishing, and thus
+		// allowing the group to make progress: deadlock.
+		//
+		// Instead we use the ioLimit and cpuLimit semaphores.
+		g, _ := errgroup.WithContext(ld.Context)
+
+		// enqueues adds a package to the type-checking queue.
+		// It must have no unfinished successors.
+		var enqueue func(*loaderPackage)
+		enqueue = func(lpkg *loaderPackage) {
+			g.Go(func() error {
+				// Parse and type-check.
+				ld.loadPackage(lpkg)
+
+				// Notify each waiting predecessor,
+				// and enqueue it when it becomes a leaf.
+				for _, pred := range lpkg.preds {
+					if pred.unfinishedSuccs.Add(-1) == 0 { // decref
+						enqueue(pred)
+					}
+				}
+
+				return nil
+			})
+		}
+
+		// Load leaves first, adding new packages
+		// to the queue as they become leaves.
+		for _, leaf := range leaves {
+			enqueue(leaf)
+		}
+
+		if err := g.Wait(); err != nil {
+			return nil, err // cancelled
 		}
-		wg.Wait()
 	}
 
 	// If the context is done, return its error and
@@ -977,7 +1007,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 		if ld.requestedMode&NeedSyntax == 0 {
 			ld.pkgs[i].Syntax = nil
 		}
-		if ld.requestedMode&NeedTypes == 0 && ld.requestedMode&NeedSyntax == 0 {
+		if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 {
 			ld.pkgs[i].Fset = nil
 		}
 		if ld.requestedMode&NeedTypesInfo == 0 {
@@ -994,31 +1024,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 	return result, nil
 }
 
-// loadRecursive loads the specified package and its dependencies,
-// recursively, in parallel, in topological order.
-// It is atomic and idempotent.
-// Precondition: ld.Mode&NeedTypes.
-func (ld *loader) loadRecursive(lpkg *loaderPackage) {
-	lpkg.loadOnce.Do(func() {
-		// Load the direct dependencies, in parallel.
-		var wg sync.WaitGroup
-		for _, ipkg := range lpkg.Imports {
-			imp := ld.pkgs[ipkg.ID]
-			wg.Add(1)
-			go func(imp *loaderPackage) {
-				ld.loadRecursive(imp)
-				wg.Done()
-			}(imp)
-		}
-		wg.Wait()
-		ld.loadPackage(lpkg)
-	})
-}
-
-// loadPackage loads the specified package.
+// loadPackage loads/parses/typechecks the specified package.
 // It must be called only once per Package,
 // after immediate dependencies are loaded.
-// Precondition: ld.Mode & NeedTypes.
+// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0.
 func (ld *loader) loadPackage(lpkg *loaderPackage) {
 	if lpkg.PkgPath == "unsafe" {
 		// Fill in the blanks to avoid surprises.
@@ -1054,6 +1063,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
 	if !lpkg.needtypes && !lpkg.needsrc {
 		return
 	}
+
+	// TODO(adonovan): this condition looks wrong:
+	// I think it should be lpkg.needtypes && !lpg.needsrc,
+	// so that NeedSyntax without NeedTypes can be satisfied by export data.
 	if !lpkg.needsrc {
 		if err := ld.loadFromExportData(lpkg); err != nil {
 			lpkg.Errors = append(lpkg.Errors, Error{
@@ -1159,7 +1172,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
 	}
 
 	lpkg.Syntax = files
-	if ld.Config.Mode&NeedTypes == 0 {
+	if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 {
 		return
 	}
 
@@ -1170,16 +1183,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
 		return
 	}
 
-	lpkg.TypesInfo = &types.Info{
-		Types:      make(map[ast.Expr]types.TypeAndValue),
-		Defs:       make(map[*ast.Ident]types.Object),
-		Uses:       make(map[*ast.Ident]types.Object),
-		Implicits:  make(map[ast.Node]types.Object),
-		Instances:  make(map[*ast.Ident]types.Instance),
-		Scopes:     make(map[ast.Node]*types.Scope),
-		Selections: make(map[*ast.SelectorExpr]*types.Selection),
+	// Populate TypesInfo only if needed, as it
+	// causes the type checker to work much harder.
+	if ld.Config.Mode&NeedTypesInfo != 0 {
+		lpkg.TypesInfo = &types.Info{
+			Types:        make(map[ast.Expr]types.TypeAndValue),
+			Defs:         make(map[*ast.Ident]types.Object),
+			Uses:         make(map[*ast.Ident]types.Object),
+			Implicits:    make(map[ast.Node]types.Object),
+			Instances:    make(map[*ast.Ident]types.Instance),
+			Scopes:       make(map[ast.Node]*types.Scope),
+			Selections:   make(map[*ast.SelectorExpr]*types.Selection),
+			FileVersions: make(map[*ast.File]string),
+		}
 	}
-	versions.InitFileVersions(lpkg.TypesInfo)
 	lpkg.TypesSizes = ld.sizes
 
 	importer := importerFunc(func(path string) (*types.Package, error) {
@@ -1232,6 +1249,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
 		}
 	}
 
+	// Type-checking is CPU intensive.
+	cpuLimit <- unit{}            // acquire a token
+	defer func() { <-cpuLimit }() // release a token
+
 	typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
 	lpkg.importErrors = nil // no longer needed
 
@@ -1296,8 +1317,11 @@ type importerFunc func(path string) (*types.Package, error)
 func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
 
 // We use a counting semaphore to limit
-// the number of parallel I/O calls per process.
-var ioLimit = make(chan bool, 20)
+// the number of parallel I/O calls or CPU threads per process.
+var (
+	ioLimit  = make(chan unit, 20)
+	cpuLimit = make(chan unit, runtime.GOMAXPROCS(0))
+)
 
 func (ld *loader) parseFile(filename string) (*ast.File, error) {
 	ld.parseCacheMu.Lock()
@@ -1314,20 +1338,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
 
 		var src []byte
 		for f, contents := range ld.Config.Overlay {
+			// TODO(adonovan): Inefficient for large overlays.
+			// Do an exact name-based map lookup
+			// (for nonexistent files) followed by a
+			// FileID-based map lookup (for existing ones).
 			if sameFile(f, filename) {
 				src = contents
+				break
 			}
 		}
 		var err error
 		if src == nil {
-			ioLimit <- true // wait
+			ioLimit <- unit{} // acquire a token
 			src, err = os.ReadFile(filename)
-			<-ioLimit // signal
+			<-ioLimit // release a token
 		}
 		if err != nil {
 			v.err = err
 		} else {
+			// Parsing is CPU intensive.
+			cpuLimit <- unit{} // acquire a token
 			v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
+			<-cpuLimit // release a token
 		}
 
 		close(v.ready)
@@ -1342,18 +1374,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
 // Because files are scanned in parallel, the token.Pos
 // positions of the resulting ast.Files are not ordered.
 func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
-	var wg sync.WaitGroup
-	n := len(filenames)
-	parsed := make([]*ast.File, n)
-	errors := make([]error, n)
-	for i, file := range filenames {
-		wg.Add(1)
-		go func(i int, filename string) {
+	var (
+		n      = len(filenames)
+		parsed = make([]*ast.File, n)
+		errors = make([]error, n)
+	)
+	var g errgroup.Group
+	for i, filename := range filenames {
+		// This creates goroutines unnecessarily in the
+		// cache-hit case, but that case is uncommon.
+		g.Go(func() error {
 			parsed[i], errors[i] = ld.parseFile(filename)
-			wg.Done()
-		}(i, file)
+			return nil
+		})
 	}
-	wg.Wait()
+	g.Wait()
 
 	// Eliminate nils, preserving order.
 	var o int
@@ -1524,4 +1559,4 @@ func usesExportData(cfg *Config) bool {
 	return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
 }
 
-var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
+type unit struct{}
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
index a70b727f..16ed3c17 100644
--- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -281,25 +281,25 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
 
 		T := o.Type()
 		if alias, ok := T.(*types.Alias); ok {
-			if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil {
+			if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil {
 				return Path(r), nil
 			}
-			if r := find(obj, aliases.Rhs(alias), append(path, opRhs), nil); r != nil {
+			if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil {
 				return Path(r), nil
 			}
 
 		} else if tname.IsAlias() {
 			// legacy alias
-			if r := find(obj, T, path, nil); r != nil {
+			if r := find(obj, T, path); r != nil {
 				return Path(r), nil
 			}
 
 		} else if named, ok := T.(*types.Named); ok {
 			// defined (named) type
-			if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil {
+			if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil {
 				return Path(r), nil
 			}
-			if r := find(obj, named.Underlying(), append(path, opUnderlying), nil); r != nil {
+			if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil {
 				return Path(r), nil
 			}
 		}
@@ -312,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
 		if _, ok := o.(*types.TypeName); !ok {
 			if o.Exported() {
 				// exported non-type (const, var, func)
-				if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
+				if r := find(obj, o.Type(), append(path, opType)); r != nil {
 					return Path(r), nil
 				}
 			}
@@ -332,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
 				if m == obj {
 					return Path(path2), nil // found declared method
 				}
-				if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
+				if r := find(obj, m.Type(), append(path2, opType)); r != nil {
 					return Path(r), nil
 				}
 			}
@@ -447,46 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
 //
 // The seen map is used to short circuit cycles through type parameters. If
 // nil, it will be allocated as necessary.
-func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
+//
+// The seenMethods map is used internally to short circuit cycles through
+// interface methods, such as occur in the following example:
+//
+//	type I interface { f() interface{I} }
+//
+// See golang/go#68046 for details.
+func find(obj types.Object, T types.Type, path []byte) []byte {
+	return (&finder{obj: obj}).find(T, path)
+}
+
+// finder closes over search state for a call to find.
+type finder struct {
+	obj             types.Object             // the sought object
+	seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters
+	seenMethods     map[*types.Func]bool     // for cycle breaking through recursive interfaces
+}
+
+func (f *finder) find(T types.Type, path []byte) []byte {
 	switch T := T.(type) {
 	case *types.Alias:
-		return find(obj, types.Unalias(T), path, seen)
+		return f.find(types.Unalias(T), path)
 	case *types.Basic, *types.Named:
 		// Named types belonging to pkg were handled already,
 		// so T must belong to another package. No path.
 		return nil
 	case *types.Pointer:
-		return find(obj, T.Elem(), append(path, opElem), seen)
+		return f.find(T.Elem(), append(path, opElem))
 	case *types.Slice:
-		return find(obj, T.Elem(), append(path, opElem), seen)
+		return f.find(T.Elem(), append(path, opElem))
 	case *types.Array:
-		return find(obj, T.Elem(), append(path, opElem), seen)
+		return f.find(T.Elem(), append(path, opElem))
 	case *types.Chan:
-		return find(obj, T.Elem(), append(path, opElem), seen)
+		return f.find(T.Elem(), append(path, opElem))
 	case *types.Map:
-		if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
+		if r := f.find(T.Key(), append(path, opKey)); r != nil {
 			return r
 		}
-		return find(obj, T.Elem(), append(path, opElem), seen)
+		return f.find(T.Elem(), append(path, opElem))
 	case *types.Signature:
-		if r := findTypeParam(obj, T.RecvTypeParams(), path, opRecvTypeParam, nil); r != nil {
+		if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil {
 			return r
 		}
-		if r := findTypeParam(obj, T.TypeParams(), path, opTypeParam, seen); r != nil {
+		if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil {
 			return r
 		}
-		if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
+		if r := f.find(T.Params(), append(path, opParams)); r != nil {
 			return r
 		}
-		return find(obj, T.Results(), append(path, opResults), seen)
+		return f.find(T.Results(), append(path, opResults))
 	case *types.Struct:
 		for i := 0; i < T.NumFields(); i++ {
 			fld := T.Field(i)
 			path2 := appendOpArg(path, opField, i)
-			if fld == obj {
+			if fld == f.obj {
 				return path2 // found field var
 			}
-			if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
+			if r := f.find(fld.Type(), append(path2, opType)); r != nil {
 				return r
 			}
 		}
@@ -495,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
 		for i := 0; i < T.Len(); i++ {
 			v := T.At(i)
 			path2 := appendOpArg(path, opAt, i)
-			if v == obj {
+			if v == f.obj {
 				return path2 // found param/result var
 			}
-			if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
+			if r := f.find(v.Type(), append(path2, opType)); r != nil {
 				return r
 			}
 		}
@@ -506,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
 	case *types.Interface:
 		for i := 0; i < T.NumMethods(); i++ {
 			m := T.Method(i)
+			if f.seenMethods[m] {
+				return nil
+			}
 			path2 := appendOpArg(path, opMethod, i)
-			if m == obj {
+			if m == f.obj {
 				return path2 // found interface method
 			}
-			if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
+			if f.seenMethods == nil {
+				f.seenMethods = make(map[*types.Func]bool)
+			}
+			f.seenMethods[m] = true
+			if r := f.find(m.Type(), append(path2, opType)); r != nil {
 				return r
 			}
 		}
 		return nil
 	case *types.TypeParam:
 		name := T.Obj()
-		if name == obj {
-			return append(path, opObj)
-		}
-		if seen[name] {
+		if f.seenTParamNames[name] {
 			return nil
 		}
-		if seen == nil {
-			seen = make(map[*types.TypeName]bool)
+		if name == f.obj {
+			return append(path, opObj)
 		}
-		seen[name] = true
-		if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
+		if f.seenTParamNames == nil {
+			f.seenTParamNames = make(map[*types.TypeName]bool)
+		}
+		f.seenTParamNames[name] = true
+		if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil {
 			return r
 		}
 		return nil
@@ -535,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
 	panic(T)
 }
 
-func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte, seen map[*types.TypeName]bool) []byte {
+func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte {
+	return (&finder{obj: obj}).findTypeParam(list, path, op)
+}
+
+func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte {
 	for i := 0; i < list.Len(); i++ {
 		tparam := list.At(i)
 		path2 := appendOpArg(path, op, i)
-		if r := find(obj, tparam, path2, seen); r != nil {
+		if r := f.find(tparam, path2); r != nil {
 			return r
 		}
 	}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index 1e19fbed..7dfc31a3 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -246,6 +246,26 @@ import (
 
 // IExportShallow encodes "shallow" export data for the specified package.
 //
+// For types, we use "shallow" export data. Historically, the Go
+// compiler always produced a summary of the types for a given package
+// that included types from other packages that it indirectly
+// referenced: "deep" export data. This had the advantage that the
+// compiler (and analogous tools such as gopls) need only load one
+// file per direct import.  However, it meant that the files tended to
+// get larger based on the level of the package in the import
+// graph. For example, higher-level packages in the kubernetes module
+// have over 1MB of "deep" export data, even when they have almost no
+// content of their own, merely because they mention a major type that
+// references many others. In pathological cases the export data was
+// 300x larger than the source for a package due to this quadratic
+// growth.
+//
+// "Shallow" export data means that the serialized types describe only
+// a single package. If those types mention types from other packages,
+// the type checker may need to request additional packages beyond
+// just the direct imports. Type information for the entire transitive
+// closure of imports is provided (lazily) by the DAG.
+//
 // No promises are made about the encoding other than that it can be decoded by
 // the same version of IIExportShallow. If you plan to save export data in the
 // file system, be sure to include a cryptographic digest of the executable in
@@ -268,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc)
 }
 
 // IImportShallow decodes "shallow" types.Package data encoded by
-// IExportShallow in the same executable. This function cannot import data from
-// cmd/compile or gcexportdata.Write.
+// [IExportShallow] in the same executable. This function cannot import data
+// from cmd/compile or gcexportdata.Write.
 //
 // The importer calls getPackages to obtain package symbols for all
 // packages mentioned in the export data, including the one being
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index 21908a15..e260c0e8 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -558,6 +558,14 @@ type importReader struct {
 	prevColumn int64
 }
 
+// markBlack is redefined in iimport_go123.go, to work around golang/go#69912.
+//
+// If TypeNames are not marked black (in the sense of go/types cycle
+// detection), they may be mutated when dot-imported. Fix this by punching a
+// hole through the type, when compiling with Go 1.23. (The bug has been fixed
+// for 1.24, but the fix was not worth back-porting).
+var markBlack = func(name *types.TypeName) {}
+
 func (r *importReader) obj(name string) {
 	tag := r.byte()
 	pos := r.pos()
@@ -570,6 +578,7 @@ func (r *importReader) obj(name string) {
 		}
 		typ := r.typ()
 		obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams)
+		markBlack(obj) // workaround for golang/go#69912
 		r.declare(obj)
 
 	case constTag:
@@ -590,6 +599,9 @@ func (r *importReader) obj(name string) {
 		// declaration before recursing.
 		obj := types.NewTypeName(pos, r.currPkg, name, nil)
 		named := types.NewNamed(obj, nil, nil)
+
+		markBlack(obj) // workaround for golang/go#69912
+
 		// Declare obj before calling r.tparamList, so the new type name is recognized
 		// if used in the constraint of one of its own typeparams (see #48280).
 		r.declare(obj)
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
new file mode 100644
index 00000000..7586bfac
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
@@ -0,0 +1,53 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.22 && !go1.24
+
+package gcimporter
+
+import (
+	"go/token"
+	"go/types"
+	"unsafe"
+)
+
+// TODO(rfindley): delete this workaround once go1.24 is assured.
+
+func init() {
+	// Update markBlack so that it correctly sets the color
+	// of imported TypeNames.
+	//
+	// See the doc comment for markBlack for details.
+
+	type color uint32
+	const (
+		white color = iota
+		black
+		grey
+	)
+	type object struct {
+		_      *types.Scope
+		_      token.Pos
+		_      *types.Package
+		_      string
+		_      types.Type
+		_      uint32
+		color_ color
+		_      token.Pos
+	}
+	type typeName struct {
+		object
+	}
+
+	// If the size of types.TypeName changes, this will fail to compile.
+	const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{}))
+	var _ [-delta * delta]int
+
+	markBlack = func(obj *types.TypeName) {
+		type uP = unsafe.Pointer
+		var ptr *typeName
+		*(*uP)(uP(&ptr)) = uP(obj)
+		ptr.color_ = black
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index c1510817..5ae57697 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -27,7 +27,6 @@ import (
 	"unicode"
 	"unicode/utf8"
 
-	"golang.org/x/sync/errgroup"
 	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/gocommand"
@@ -91,18 +90,6 @@ type ImportFix struct {
 	Relevance float64 // see pkg
 }
 
-// An ImportInfo represents a single import statement.
-type ImportInfo struct {
-	ImportPath string // import path, e.g. "crypto/rand".
-	Name       string // import name, e.g. "crand", or "" if none.
-}
-
-// A packageInfo represents what's known about a package.
-type packageInfo struct {
-	name    string          // real package name, if known.
-	exports map[string]bool // known exports.
-}
-
 // parseOtherFiles parses all the Go files in srcDir except filename, including
 // test files if filename looks like a test.
 //
@@ -162,8 +149,8 @@ func addGlobals(f *ast.File, globals map[string]bool) {
 
 // collectReferences builds a map of selector expressions, from
 // left hand side (X) to a set of right hand sides (Sel).
-func collectReferences(f *ast.File) references {
-	refs := references{}
+func collectReferences(f *ast.File) References {
+	refs := References{}
 
 	var visitor visitFn
 	visitor = func(node ast.Node) ast.Visitor {
@@ -233,7 +220,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
 
 		allFound := true
 		for right := range syms {
-			if !pkgInfo.exports[right] {
+			if !pkgInfo.Exports[right] {
 				allFound = false
 				break
 			}
@@ -246,11 +233,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
 	return nil
 }
 
-// references is set of references found in a Go file. The first map key is the
-// left hand side of a selector expression, the second key is the right hand
-// side, and the value should always be true.
-type references map[string]map[string]bool
-
 // A pass contains all the inputs and state necessary to fix a file's imports.
 // It can be modified in some ways during use; see comments below.
 type pass struct {
@@ -258,27 +240,29 @@ type pass struct {
 	fset                 *token.FileSet // fset used to parse f and its siblings.
 	f                    *ast.File      // the file being fixed.
 	srcDir               string         // the directory containing f.
-	env                  *ProcessEnv    // the environment to use for go commands, etc.
-	loadRealPackageNames bool           // if true, load package names from disk rather than guessing them.
-	otherFiles           []*ast.File    // sibling files.
+	logf                 func(string, ...any)
+	source               Source      // the environment to use for go commands, etc.
+	loadRealPackageNames bool        // if true, load package names from disk rather than guessing them.
+	otherFiles           []*ast.File // sibling files.
+	goroot               string
 
 	// Intermediate state, generated by load.
 	existingImports map[string][]*ImportInfo
-	allRefs         references
-	missingRefs     references
+	allRefs         References
+	missingRefs     References
 
 	// Inputs to fix. These can be augmented between successive fix calls.
 	lastTry       bool                    // indicates that this is the last call and fix should clean up as best it can.
 	candidates    []*ImportInfo           // candidate imports in priority order.
-	knownPackages map[string]*packageInfo // information about all known packages.
+	knownPackages map[string]*PackageInfo // information about all known packages.
 }
 
 // loadPackageNames saves the package names for everything referenced by imports.
-func (p *pass) loadPackageNames(imports []*ImportInfo) error {
-	if p.env.Logf != nil {
-		p.env.Logf("loading package names for %v packages", len(imports))
+func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error {
+	if p.logf != nil {
+		p.logf("loading package names for %v packages", len(imports))
 		defer func() {
-			p.env.Logf("done loading package names for %v packages", len(imports))
+			p.logf("done loading package names for %v packages", len(imports))
 		}()
 	}
 	var unknown []string
@@ -289,20 +273,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error {
 		unknown = append(unknown, imp.ImportPath)
 	}
 
-	resolver, err := p.env.GetResolver()
-	if err != nil {
-		return err
-	}
-
-	names, err := resolver.loadPackageNames(unknown, p.srcDir)
+	names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown)
 	if err != nil {
 		return err
 	}
 
+	// TODO(rfindley): revisit this. Why do we need to store known packages with
+	// no exports? The inconsistent data is confusing.
 	for path, name := range names {
-		p.knownPackages[path] = &packageInfo{
-			name:    name,
-			exports: map[string]bool{},
+		p.knownPackages[path] = &PackageInfo{
+			Name:    name,
+			Exports: map[string]bool{},
 		}
 	}
 	return nil
@@ -330,8 +311,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
 		return imp.Name
 	}
 	known := p.knownPackages[imp.ImportPath]
-	if known != nil && known.name != "" {
-		return withoutVersion(known.name)
+	if known != nil && known.Name != "" {
+		return withoutVersion(known.Name)
 	}
 	return ImportPathToAssumedName(imp.ImportPath)
 }
@@ -339,9 +320,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
 // load reads in everything necessary to run a pass, and reports whether the
 // file already has all the imports it needs. It fills in p.missingRefs with the
 // file's missing symbols, if any, or removes unused imports if not.
-func (p *pass) load() ([]*ImportFix, bool) {
-	p.knownPackages = map[string]*packageInfo{}
-	p.missingRefs = references{}
+func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) {
+	p.knownPackages = map[string]*PackageInfo{}
+	p.missingRefs = References{}
 	p.existingImports = map[string][]*ImportInfo{}
 
 	// Load basic information about the file in question.
@@ -364,9 +345,11 @@ func (p *pass) load() ([]*ImportFix, bool) {
 	// f's imports by the identifier they introduce.
 	imports := collectImports(p.f)
 	if p.loadRealPackageNames {
-		err := p.loadPackageNames(append(imports, p.candidates...))
+		err := p.loadPackageNames(ctx, append(imports, p.candidates...))
 		if err != nil {
-			p.env.logf("loading package names: %v", err)
+			if p.logf != nil {
+				p.logf("loading package names: %v", err)
+			}
 			return nil, false
 		}
 	}
@@ -535,9 +518,10 @@ func (p *pass) assumeSiblingImportsValid() {
 					// We have the stdlib in memory; no need to guess.
 					rights = symbolNameSet(m)
 				}
-				p.addCandidate(imp, &packageInfo{
+				// TODO(rfindley): we should set package name here, for consistency.
+				p.addCandidate(imp, &PackageInfo{
 					// no name; we already know it.
-					exports: rights,
+					Exports: rights,
 				})
 			}
 		}
@@ -546,14 +530,14 @@ func (p *pass) assumeSiblingImportsValid() {
 
 // addCandidate adds a candidate import to p, and merges in the information
 // in pkg.
-func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
+func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) {
 	p.candidates = append(p.candidates, imp)
 	if existing, ok := p.knownPackages[imp.ImportPath]; ok {
-		if existing.name == "" {
-			existing.name = pkg.name
+		if existing.Name == "" {
+			existing.Name = pkg.Name
 		}
-		for export := range pkg.exports {
-			existing.exports[export] = true
+		for export := range pkg.Exports {
+			existing.Exports[export] = true
 		}
 	} else {
 		p.knownPackages[imp.ImportPath] = pkg
@@ -581,19 +565,42 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P
 // getFixes gets the import fixes that need to be made to f in order to fix the imports.
 // It does not modify the ast.
 func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
+	source, err := NewProcessEnvSource(env, filename, f.Name.Name)
+	if err != nil {
+		return nil, err
+	}
+	goEnv, err := env.goEnv()
+	if err != nil {
+		return nil, err
+	}
+	return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source)
+}
+
+func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) {
+	// This logic is defensively duplicated from getFixes.
 	abs, err := filepath.Abs(filename)
 	if err != nil {
 		return nil, err
 	}
 	srcDir := filepath.Dir(abs)
-	env.logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
+
+	if logf != nil {
+		logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir)
+	}
 
 	// First pass: looking only at f, and using the naive algorithm to
 	// derive package names from import paths, see if the file is already
 	// complete. We can't add any imports yet, because we don't know
 	// if missing references are actually package vars.
-	p := &pass{fset: fset, f: f, srcDir: srcDir, env: env}
-	if fixes, done := p.load(); done {
+	p := &pass{
+		fset:   fset,
+		f:      f,
+		srcDir: srcDir,
+		logf:   logf,
+		goroot: goroot,
+		source: source,
+	}
+	if fixes, done := p.load(ctx); done {
 		return fixes, nil
 	}
 
@@ -605,7 +612,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
 	// Second pass: add information from other files in the same package,
 	// like their package vars and imports.
 	p.otherFiles = otherFiles
-	if fixes, done := p.load(); done {
+	if fixes, done := p.load(ctx); done {
 		return fixes, nil
 	}
 
@@ -618,10 +625,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
 
 	// Third pass: get real package names where we had previously used
 	// the naive algorithm.
-	p = &pass{fset: fset, f: f, srcDir: srcDir, env: env}
+	p = &pass{
+		fset:   fset,
+		f:      f,
+		srcDir: srcDir,
+		logf:   logf,
+		goroot: goroot,
+		source: p.source, // safe to reuse, as it's just a wrapper around env
+	}
 	p.loadRealPackageNames = true
 	p.otherFiles = otherFiles
-	if fixes, done := p.load(); done {
+	if fixes, done := p.load(ctx); done {
 		return fixes, nil
 	}
 
@@ -835,7 +849,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
 			return true
 		},
 		dirFound: func(pkg *pkg) bool {
-			return pkgIsCandidate(filename, references{searchPkg: nil}, pkg)
+			return pkgIsCandidate(filename, References{searchPkg: nil}, pkg)
 		},
 		packageNameLoaded: func(pkg *pkg) bool {
 			return pkg.packageName == searchPkg
@@ -1086,11 +1100,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string)
 	return e.GocmdRunner.Run(ctx, inv)
 }
 
-func addStdlibCandidates(pass *pass, refs references) error {
-	goenv, err := pass.env.goEnv()
-	if err != nil {
-		return err
-	}
+func addStdlibCandidates(pass *pass, refs References) error {
 	localbase := func(nm string) string {
 		ans := path.Base(nm)
 		if ans[0] == 'v' {
@@ -1105,13 +1115,13 @@ func addStdlibCandidates(pass *pass, refs references) error {
 	}
 	add := func(pkg string) {
 		// Prevent self-imports.
-		if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir {
+		if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir {
 			return
 		}
 		exports := symbolNameSet(stdlib.PackageSymbols[pkg])
 		pass.addCandidate(
 			&ImportInfo{ImportPath: pkg},
-			&packageInfo{name: localbase(pkg), exports: exports})
+			&PackageInfo{Name: localbase(pkg), Exports: exports})
 	}
 	for left := range refs {
 		if left == "rand" {
@@ -1175,91 +1185,14 @@ type scanCallback struct {
 	exportsLoaded func(pkg *pkg, exports []stdlib.Symbol)
 }
 
-func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error {
+func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error {
 	ctx, done := event.Start(ctx, "imports.addExternalCandidates")
 	defer done()
 
-	var mu sync.Mutex
-	found := make(map[string][]pkgDistance)
-	callback := &scanCallback{
-		rootFound: func(gopathwalk.Root) bool {
-			return true // We want everything.
-		},
-		dirFound: func(pkg *pkg) bool {
-			return pkgIsCandidate(filename, refs, pkg)
-		},
-		packageNameLoaded: func(pkg *pkg) bool {
-			if _, want := refs[pkg.packageName]; !want {
-				return false
-			}
-			if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName {
-				// The candidate is in the same directory and has the
-				// same package name. Don't try to import ourselves.
-				return false
-			}
-			if !canUse(filename, pkg.dir) {
-				return false
-			}
-			mu.Lock()
-			defer mu.Unlock()
-			found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)})
-			return false // We'll do our own loading after we sort.
-		},
-	}
-	resolver, err := pass.env.GetResolver()
+	results, err := pass.source.ResolveReferences(ctx, filename, refs)
 	if err != nil {
 		return err
 	}
-	if err = resolver.scan(ctx, callback); err != nil {
-		return err
-	}
-
-	// Search for imports matching potential package references.
-	type result struct {
-		imp *ImportInfo
-		pkg *packageInfo
-	}
-	results := make([]*result, len(refs))
-
-	g, ctx := errgroup.WithContext(ctx)
-
-	searcher := symbolSearcher{
-		logf:        pass.env.logf,
-		srcDir:      pass.srcDir,
-		xtest:       strings.HasSuffix(pass.f.Name.Name, "_test"),
-		loadExports: resolver.loadExports,
-	}
-
-	i := 0
-	for pkgName, symbols := range refs {
-		index := i // claim an index in results
-		i++
-		pkgName := pkgName
-		symbols := symbols
-
-		g.Go(func() error {
-			found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
-			if err != nil {
-				return err
-			}
-			if found == nil {
-				return nil // No matching package.
-			}
-
-			imp := &ImportInfo{
-				ImportPath: found.importPathShort,
-			}
-			pkg := &packageInfo{
-				name:    pkgName,
-				exports: symbols,
-			}
-			results[index] = &result{imp, pkg}
-			return nil
-		})
-	}
-	if err := g.Wait(); err != nil {
-		return err
-	}
 
 	for _, result := range results {
 		if result == nil {
@@ -1267,7 +1200,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
 		}
 		// Don't offer completions that would shadow predeclared
 		// names, such as github.com/coreos/etcd/error.
-		if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
+		if types.Universe.Lookup(result.Package.Name) != nil { // predeclared
 			// Ideally we would skip this candidate only
 			// if the predeclared name is actually
 			// referenced by the file, but that's a lot
@@ -1276,7 +1209,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
 			// user before long.
 			continue
 		}
-		pass.addCandidate(result.imp, result.pkg)
+		pass.addCandidate(result.Import, result.Package)
 	}
 	return nil
 }
@@ -1801,7 +1734,7 @@ func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols m
 // filename is the file being formatted.
 // pkgIdent is the package being searched for, like "client" (if
 // searching for "client.New")
-func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
+func pkgIsCandidate(filename string, refs References, pkg *pkg) bool {
 	// Check "internal" and "vendor" visibility:
 	if !canUse(filename, pkg.dir) {
 		return false
diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go
index ff6b59a5..2215a128 100644
--- a/vendor/golang.org/x/tools/internal/imports/imports.go
+++ b/vendor/golang.org/x/tools/internal/imports/imports.go
@@ -47,7 +47,14 @@ type Options struct {
 // Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
 func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
 	fileSet := token.NewFileSet()
-	file, adjust, err := parse(fileSet, filename, src, opt)
+	var parserMode parser.Mode
+	if opt.Comments {
+		parserMode |= parser.ParseComments
+	}
+	if opt.AllErrors {
+		parserMode |= parser.AllErrors
+	}
+	file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment)
 	if err != nil {
 		return nil, err
 	}
@@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e
 //
 // Note that filename's directory influences which imports can be chosen,
 // so it is important that filename be accurate.
-func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
+func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) {
 	ctx, done := event.Start(ctx, "imports.FixImports")
 	defer done()
 
 	fileSet := token.NewFileSet()
-	file, _, err := parse(fileSet, filename, src, opt)
+	// TODO(rfindley): these default values for ParseComments and AllErrors were
+	// extracted from gopls, but are they even needed?
+	file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true)
 	if err != nil {
 		return nil, err
 	}
 
-	return getFixes(ctx, fileSet, file, filename, opt.Env)
+	return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source)
 }
 
 // ApplyFixes applies all of the fixes to the file and formats it. extraMode
@@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
 // formatted file, and returns the postpocessed result.
 func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
 	mergeImports(file)
-	sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
+	sortImports(opt.LocalPrefix, fset.File(file.FileStart), file)
 	var spacesBefore []string // import paths we need spaces before
 	for _, impSection := range astutil.Imports(fset, file) {
 		// Within each block of contiguous imports, see if any
@@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori
 
 // parse parses src, which was read from filename,
 // as a Go source file or statement list.
-func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
-	var parserMode parser.Mode // legacy ast.Object resolution is required here
-	if opt.Comments {
-		parserMode |= parser.ParseComments
-	}
-	if opt.AllErrors {
-		parserMode |= parser.AllErrors
+func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) {
+	if parserMode&parser.SkipObjectResolution != 0 {
+		panic("legacy ast.Object resolution is required")
 	}
 
 	// Try as whole source file.
@@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
 	// If the error is that the source file didn't begin with a
 	// package line and we accept fragmented input, fall through to
 	// try as a source fragment.  Stop and return on any other error.
-	if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
+	if !fragment || !strings.Contains(err.Error(), "expected 'package'") {
 		return nil, nil, err
 	}
 
diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go
new file mode 100644
index 00000000..5d2aeeeb
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/imports/source.go
@@ -0,0 +1,63 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import "context"
+
+// These types document the APIs below.
+//
+// TODO(rfindley): consider making these defined types rather than aliases.
+type (
+	ImportPath  = string
+	PackageName = string
+	Symbol      = string
+
+	// References is set of References found in a Go file. The first map key is the
+	// left hand side of a selector expression, the second key is the right hand
+	// side, and the value should always be true.
+	References = map[PackageName]map[Symbol]bool
+)
+
+// A Result satisfies a missing import.
+//
+// The Import field describes the missing import spec, and the Package field
+// summarizes the package exports.
+type Result struct {
+	Import  *ImportInfo
+	Package *PackageInfo
+}
+
+// An ImportInfo represents a single import statement.
+type ImportInfo struct {
+	ImportPath string // import path, e.g. "crypto/rand".
+	Name       string // import name, e.g. "crand", or "" if none.
+}
+
+// A PackageInfo represents what's known about a package.
+type PackageInfo struct {
+	Name    string          // package name in the package declaration, if known
+	Exports map[string]bool // set of names of known package level sortSymbols
+}
+
+// A Source provides imports to satisfy unresolved references in the file being
+// fixed.
+type Source interface {
+	// LoadPackageNames queries PackageName information for the requested import
+	// paths, when operating from the provided srcDir.
+	//
+	// TODO(rfindley): try to refactor to remove this operation.
+	LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error)
+
+	// ResolveReferences asks the Source for the best package name to satisfy
+	// each of the missing references, in the context of fixing the given
+	// filename.
+	//
+	// Returns a map from package name to a [Result] for that package name that
+	// provides the required symbols. Keys may be omitted in the map if no
+	// candidates satisfy all missing references for that package name. It is up
+	// to each data source to select the best result for each entry in the
+	// missing map.
+	ResolveReferences(ctx context.Context, filename string, missing References) (map[PackageName]*Result, error)
+}
diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go
new file mode 100644
index 00000000..ff9555d2
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/imports/source_env.go
@@ -0,0 +1,125 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import (
+	"context"
+	"path/filepath"
+	"strings"
+	"sync"
+
+	"golang.org/x/sync/errgroup"
+	"golang.org/x/tools/internal/gopathwalk"
+)
+
+// ProcessEnvSource implements the [Source] interface using the legacy
+// [ProcessEnv] abstraction.
+type ProcessEnvSource struct {
+	env      *ProcessEnv
+	srcDir   string
+	filename string
+	pkgName  string
+}
+
+// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given
+// env, to be used for fixing imports in the file with name filename in package
+// named pkgName.
+func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) {
+	abs, err := filepath.Abs(filename)
+	if err != nil {
+		return nil, err
+	}
+	srcDir := filepath.Dir(abs)
+	return &ProcessEnvSource{
+		env:      env,
+		srcDir:   srcDir,
+		filename: filename,
+		pkgName:  pkgName,
+	}, nil
+}
+
+func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) {
+	r, err := s.env.GetResolver()
+	if err != nil {
+		return nil, err
+	}
+	return r.loadPackageNames(unknown, srcDir)
+}
+
+func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) (map[string]*Result, error) {
+	var mu sync.Mutex
+	found := make(map[string][]pkgDistance)
+	callback := &scanCallback{
+		rootFound: func(gopathwalk.Root) bool {
+			return true // We want everything.
+		},
+		dirFound: func(pkg *pkg) bool {
+			return pkgIsCandidate(filename, refs, pkg)
+		},
+		packageNameLoaded: func(pkg *pkg) bool {
+			if _, want := refs[pkg.packageName]; !want {
+				return false
+			}
+			if pkg.dir == s.srcDir && s.pkgName == pkg.packageName {
+				// The candidate is in the same directory and has the
+				// same package name. Don't try to import ourselves.
+				return false
+			}
+			if !canUse(filename, pkg.dir) {
+				return false
+			}
+			mu.Lock()
+			defer mu.Unlock()
+			found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)})
+			return false // We'll do our own loading after we sort.
+		},
+	}
+	resolver, err := s.env.GetResolver()
+	if err != nil {
+		return nil, err
+	}
+	if err := resolver.scan(ctx, callback); err != nil {
+		return nil, err
+	}
+
+	g, ctx := errgroup.WithContext(ctx)
+
+	searcher := symbolSearcher{
+		logf:        s.env.logf,
+		srcDir:      s.srcDir,
+		xtest:       strings.HasSuffix(s.pkgName, "_test"),
+		loadExports: resolver.loadExports,
+	}
+
+	var resultMu sync.Mutex
+	results := make(map[string]*Result, len(refs))
+	for pkgName, symbols := range refs {
+		g.Go(func() error {
+			found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
+			if err != nil {
+				return err
+			}
+			if found == nil {
+				return nil // No matching package.
+			}
+
+			imp := &ImportInfo{
+				ImportPath: found.importPathShort,
+			}
+			pkg := &PackageInfo{
+				Name:    pkgName,
+				Exports: symbols,
+			}
+			resultMu.Lock()
+			results[pkgName] = &Result{Import: imp, Package: pkg}
+			resultMu.Unlock()
+			return nil
+		})
+	}
+	if err := g.Wait(); err != nil {
+		return nil, err
+	}
+	return results, nil
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go
index 35810826..0ade5c29 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/free.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/free.go
@@ -6,6 +6,8 @@ package typeparams
 
 import (
 	"go/types"
+
+	"golang.org/x/tools/internal/aliases"
 )
 
 // Free is a memoization of the set of free type parameters within a
@@ -36,6 +38,18 @@ func (w *Free) Has(typ types.Type) (res bool) {
 		break
 
 	case *types.Alias:
+		if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
+			return true // This is an uninstantiated Alias.
+		}
+		// The expansion of an alias can have free type parameters,
+		// whether or not the alias itself has type parameters:
+		//
+		//   func _[K comparable]() {
+		//     type Set      = map[K]bool // free(Set)      = {K}
+		//     type MapTo[V] = map[K]V    // free(Map[foo]) = {V}
+		//   }
+		//
+		// So, we must Unalias.
 		return w.Has(types.Unalias(t))
 
 	case *types.Array:
@@ -96,9 +110,8 @@ func (w *Free) Has(typ types.Type) (res bool) {
 
 	case *types.Named:
 		args := t.TypeArgs()
-		// TODO(taking): this does not match go/types/infer.go. Check with rfindley.
 		if params := t.TypeParams(); params.Len() > args.Len() {
-			return true
+			return true // this is an uninstantiated named type.
 		}
 		for i, n := 0, args.Len(); i < n; i++ {
 			if w.Has(args.At(i)) {
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index 83923286..df3ea521 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -11,6 +11,8 @@ import (
 	"go/types"
 	"reflect"
 	"unsafe"
+
+	"golang.org/x/tools/internal/aliases"
 )
 
 func SetUsesCgo(conf *types.Config) bool {
@@ -63,3 +65,57 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
 		return other.Name()
 	}
 }
+
+// A NamedOrAlias is a [types.Type] that is named (as
+// defined by the spec) and capable of bearing type parameters: it
+// abstracts aliases ([types.Alias]) and defined types
+// ([types.Named]).
+//
+// Every type declared by an explicit "type" declaration is a
+// NamedOrAlias. (Built-in type symbols may additionally
+// have type [types.Basic], which is not a NamedOrAlias,
+// though the spec regards them as "named".)
+//
+// NamedOrAlias cannot expose the Origin method, because
+// [types.Alias.Origin] and [types.Named.Origin] have different
+// (covariant) result types; use [Origin] instead.
+type NamedOrAlias interface {
+	types.Type
+	Obj() *types.TypeName
+}
+
+// TypeParams is a light shim around t.TypeParams().
+// (go/types.Alias).TypeParams requires >= 1.23.
+func TypeParams(t NamedOrAlias) *types.TypeParamList {
+	switch t := t.(type) {
+	case *types.Alias:
+		return aliases.TypeParams(t)
+	case *types.Named:
+		return t.TypeParams()
+	}
+	return nil
+}
+
+// TypeArgs is a light shim around t.TypeArgs().
+// (go/types.Alias).TypeArgs requires >= 1.23.
+func TypeArgs(t NamedOrAlias) *types.TypeList {
+	switch t := t.(type) {
+	case *types.Alias:
+		return aliases.TypeArgs(t)
+	case *types.Named:
+		return t.TypeArgs()
+	}
+	return nil
+}
+
+// Origin returns the generic type of the Named or Alias type t if it
+// is instantiated, otherwise it returns t.
+func Origin(t NamedOrAlias) NamedOrAlias {
+	switch t := t.(type) {
+	case *types.Alias:
+		return aliases.Origin(t)
+	case *types.Named:
+		return t.Origin()
+	}
+	return t
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go
index f0bb0d15..0fc10ce4 100644
--- a/vendor/golang.org/x/tools/internal/versions/types.go
+++ b/vendor/golang.org/x/tools/internal/versions/types.go
@@ -31,8 +31,3 @@ func FileVersion(info *types.Info, file *ast.File) string {
 	// This would act as a max version on what a tool can support.
 	return Future
 }
-
-// InitFileVersions initializes info to record Go versions for Go files.
-func InitFileVersions(info *types.Info) {
-	info.FileVersions = make(map[*ast.File]string)
-}
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 0854d298..d9bfa6e1 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR
 organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
 and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
 
-If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
+If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
 
 ## Legal requirements
 
@@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly.
   is a great place to start. These issues are well-documented and usually can be
   resolved with a single pull request.
 
-- If you are adding a new file, make sure it has the copyright message template 
-  at the top as a comment. You can copy over the message from an existing file 
+- If you are adding a new file, make sure it has the copyright message template
+  at the top as a comment. You can copy over the message from an existing file
   and update the year.
 
 - The grpc package should only depend on standard Go packages and a small number
@@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly.
   proposal](https://github.com/grpc/proposal).
 
 - Provide a good **PR description** as a record of **what** change is being made
-  and **why** it was made. Link to a github issue if it exists.
+  and **why** it was made. Link to a GitHub issue if it exists.
 
-- If you want to fix formatting or style, consider whether your changes are an 
-  obvious improvement or might be considered a personal preference. If a style 
-  change is based on preference, it likely will not be accepted. If it corrects 
-  widely agreed-upon anti-patterns, then please do create a PR and explain the 
+- If you want to fix formatting or style, consider whether your changes are an
+  obvious improvement or might be considered a personal preference. If a style
+  change is based on preference, it likely will not be accepted. If it corrects
+  widely agreed-upon anti-patterns, then please do create a PR and explain the
   benefits of the change.
 
 - Unless your PR is trivial, you should expect there will be reviewer comments
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index b181f386..3a2092f1 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -130,7 +130,7 @@ type SubConn interface {
 	// UpdateAddresses updates the addresses used in this SubConn.
 	// gRPC checks if currently-connected address is still in the new list.
 	// If it's in the list, the connection will be kept.
-	// If it's not in the list, the connection will gracefully closed, and
+	// If it's not in the list, the connection will gracefully close, and
 	// a new connection will be created.
 	//
 	// This will trigger a state transition for the SubConn.
@@ -142,8 +142,11 @@ type SubConn interface {
 	Connect()
 	// GetOrBuildProducer returns a reference to the existing Producer for this
 	// ProducerBuilder in this SubConn, or, if one does not currently exist,
-	// creates a new one and returns it.  Returns a close function which must
-	// be called when the Producer is no longer needed.
+	// creates a new one and returns it.  Returns a close function which may be
+	// called when the Producer is no longer needed.  Otherwise the producer
+	// will automatically be closed upon connection loss or subchannel close.
+	// Should only be called on a SubConn in state Ready.  Otherwise the
+	// producer will be unable to create streams.
 	GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
 	// Shutdown shuts down the SubConn gracefully.  Any started RPCs will be
 	// allowed to complete.  No future calls should be made on the SubConn.
@@ -452,8 +455,10 @@ type ProducerBuilder interface {
 	// Build creates a Producer.  The first parameter is always a
 	// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
 	// associated SubConn), but is declared as `any` to avoid a dependency
-	// cycle.  Should also return a close function that will be called when all
-	// references to the Producer have been given up.
+	// cycle.  Build also returns a close function that will be called when all
+	// references to the Producer have been given up for a SubConn, or when a
+	// connectivity state change occurs on the SubConn.  The close function
+	// should always block until all asynchronous cleanup work is completed.
 	Build(grpcClientConnInterface any) (p Producer, close func())
 }
 
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index 2b87bd79..d5ed172a 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
 		}
 	}
 	// If resolver state contains no addresses, return an error so ClientConn
-	// will trigger re-resolve. Also records this as an resolver error, so when
+	// will trigger re-resolve. Also records this as a resolver error, so when
 	// the overall state turns transient failure, the error message will have
 	// the zero address information.
 	if len(s.ResolverState.Addresses) == 0 {
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
new file mode 100644
index 00000000..c5197894
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains code internal to the pickfirst package.
+package internal
+
+import "math/rand"
+
+// RandShuffle pseudo-randomizes the order of addresses.
+var RandShuffle = rand.Shuffle
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
index 4d69b405..e069346a 100644
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -26,18 +26,23 @@ import (
 	"math/rand"
 
 	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/pickfirst/internal"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/envconfig"
 	internalgrpclog "google.golang.org/grpc/internal/grpclog"
 	"google.golang.org/grpc/internal/pretty"
 	"google.golang.org/grpc/resolver"
 	"google.golang.org/grpc/serviceconfig"
+
+	_ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
 )
 
 func init() {
+	if envconfig.NewPickFirstEnabled {
+		return
+	}
 	balancer.Register(pickfirstBuilder{})
-	internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
 }
 
 var logger = grpclog.Component("pick-first-lb")
@@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) {
 	})
 }
 
+// Shuffler is an interface for shuffling an address list.
 type Shuffler interface {
 	ShuffleAddressListForTesting(n int, swap func(i, j int))
 }
 
+// ShuffleAddressListForTesting pseudo-randomizes the order of addresses.  n
+// is the number of elements.  swap swaps the elements with indexes i and j.
 func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
 
 func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
@@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
 		// within each endpoint. - A61
 		if cfg.ShuffleAddressList {
 			endpoints = append([]resolver.Endpoint{}, endpoints...)
-			internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
 		}
 
 		// "Flatten the list by concatenating the ordered list of addresses for each
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
new file mode 100644
index 00000000..985b6edc
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
@@ -0,0 +1,625 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pickfirstleaf contains the pick_first load balancing policy which
+// will be the universal leaf policy after dualstack changes are implemented.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package pickfirstleaf
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"sync"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/pickfirst/internal"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/envconfig"
+	internalgrpclog "google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/pretty"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+func init() {
+	if envconfig.NewPickFirstEnabled {
+		// Register as the default pick_first balancer.
+		Name = "pick_first"
+	}
+	balancer.Register(pickfirstBuilder{})
+}
+
+var (
+	logger = grpclog.Component("pick-first-leaf-lb")
+	// Name is the name of the pick_first_leaf balancer.
+	// It is changed to "pick_first" in init() if this balancer is to be
+	// registered as the default pickfirst.
+	Name = "pick_first_leaf"
+)
+
+// TODO: change to pick-first when this becomes the default pick_first policy.
+const logPrefix = "[pick-first-leaf-lb %p] "
+
+type pickfirstBuilder struct{}
+
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
+	b := &pickfirstBalancer{
+		cc:          cc,
+		addressList: addressList{},
+		subConns:    resolver.NewAddressMap(),
+		state:       connectivity.Connecting,
+		mu:          sync.Mutex{},
+	}
+	b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
+	return b
+}
+
+func (b pickfirstBuilder) Name() string {
+	return Name
+}
+
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+	var cfg pfConfig
+	if err := json.Unmarshal(js, &cfg); err != nil {
+		return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+	}
+	return cfg, nil
+}
+
+type pfConfig struct {
+	serviceconfig.LoadBalancingConfig `json:"-"`
+
+	// If set to true, instructs the LB policy to shuffle the order of the list
+	// of endpoints received from the name resolver before attempting to
+	// connect to them.
+	ShuffleAddressList bool `json:"shuffleAddressList"`
+}
+
+// scData keeps track of the current state of the subConn.
+// It is not safe for concurrent access.
+type scData struct {
+	// The following fields are initialized at build time and read-only after
+	// that.
+	subConn balancer.SubConn
+	addr    resolver.Address
+
+	state   connectivity.State
+	lastErr error
+}
+
+func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
+	sd := &scData{
+		state: connectivity.Idle,
+		addr:  addr,
+	}
+	sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
+		StateListener: func(state balancer.SubConnState) {
+			b.updateSubConnState(sd, state)
+		},
+	})
+	if err != nil {
+		return nil, err
+	}
+	sd.subConn = sc
+	return sd, nil
+}
+
+type pickfirstBalancer struct {
+	// The following fields are initialized at build time and read-only after
+	// that and therefore do not need to be guarded by a mutex.
+	logger *internalgrpclog.PrefixLogger
+	cc     balancer.ClientConn
+
+	// The mutex is used to ensure synchronization of updates triggered
+	// from the idle picker and the already serialized resolver,
+	// SubConn state updates.
+	mu    sync.Mutex
+	state connectivity.State
+	// scData for active subonns mapped by address.
+	subConns    *resolver.AddressMap
+	addressList addressList
+	firstPass   bool
+	numTF       int
+}
+
+// ResolverError is called by the ClientConn when the name resolver produces
+// an error or when pickfirst determined the resolver update to be invalid.
+func (b *pickfirstBalancer) ResolverError(err error) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	b.resolverErrorLocked(err)
+}
+
+func (b *pickfirstBalancer) resolverErrorLocked(err error) {
+	if b.logger.V(2) {
+		b.logger.Infof("Received error from the name resolver: %v", err)
+	}
+
+	// The picker will not change since the balancer does not currently
+	// report an error. If the balancer hasn't received a single good resolver
+	// update yet, transition to TRANSIENT_FAILURE.
+	if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
+		if b.logger.V(2) {
+			b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
+		}
+		return
+	}
+
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: connectivity.TransientFailure,
+		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)},
+	})
+}
+
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
+		// Cleanup state pertaining to the previous resolver state.
+		// Treat an empty address list like an error by calling b.ResolverError.
+		b.state = connectivity.TransientFailure
+		b.closeSubConnsLocked()
+		b.addressList.updateAddrs(nil)
+		b.resolverErrorLocked(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
+	cfg, ok := state.BalancerConfig.(pfConfig)
+	if state.BalancerConfig != nil && !ok {
+		return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
+	}
+
+	if b.logger.V(2) {
+		b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
+	}
+
+	var newAddrs []resolver.Address
+	if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+		// Perform the optional shuffling described in gRFC A62. The shuffling
+		// will change the order of endpoints but not touch the order of the
+		// addresses within each endpoint. - A61
+		if cfg.ShuffleAddressList {
+			endpoints = append([]resolver.Endpoint{}, endpoints...)
+			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+		}
+
+		// "Flatten the list by concatenating the ordered list of addresses for
+		// each of the endpoints, in order." - A61
+		for _, endpoint := range endpoints {
+			// "In the flattened list, interleave addresses from the two address
+			// families, as per RFC-8305 section 4." - A61
+			// TODO: support the above language.
+			newAddrs = append(newAddrs, endpoint.Addresses...)
+		}
+	} else {
+		// Endpoints not set, process addresses until we migrate resolver
+		// emissions fully to Endpoints. The top channel does wrap emitted
+		// addresses with endpoints, however some balancers such as weighted
+		// target do not forward the corresponding correct endpoints down/split
+		// endpoints properly. Once all balancers correctly forward endpoints
+		// down, can delete this else conditional.
+		newAddrs = state.ResolverState.Addresses
+		if cfg.ShuffleAddressList {
+			newAddrs = append([]resolver.Address{}, newAddrs...)
+			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+		}
+	}
+
+	// If an address appears in multiple endpoints or in the same endpoint
+	// multiple times, we keep it only once. We will create only one SubConn
+	// for the address because an AddressMap is used to store SubConns.
+	// Not de-duplicating would result in attempting to connect to the same
+	// SubConn multiple times in the same pass. We don't want this.
+	newAddrs = deDupAddresses(newAddrs)
+
+	// Since we have a new set of addresses, we are again at first pass.
+	b.firstPass = true
+
+	// If the previous ready SubConn exists in new address list,
+	// keep this connection and don't create new SubConns.
+	prevAddr := b.addressList.currentAddress()
+	prevAddrsCount := b.addressList.size()
+	b.addressList.updateAddrs(newAddrs)
+	if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) {
+		return nil
+	}
+
+	b.reconcileSubConnsLocked(newAddrs)
+	// If it's the first resolver update or the balancer was already READY
+	// (but the new address list does not contain the ready SubConn) or
+	// CONNECTING, enter CONNECTING.
+	// We may be in TRANSIENT_FAILURE due to a previous empty address list,
+	// we should still enter CONNECTING because the sticky TF behaviour
+	//  mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
+	// due to connectivity failures.
+	if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 {
+		// Start connection attempt at first address.
+		b.state = connectivity.Connecting
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: connectivity.Connecting,
+			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+		})
+		b.requestConnectionLocked()
+	} else if b.state == connectivity.TransientFailure {
+		// If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
+		// we're READY. See A62.
+		b.requestConnectionLocked()
+	}
+	return nil
+}
+
+// UpdateSubConnState is unused as a StateListener is always registered when
+// creating SubConns.
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+	b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
+}
+
+func (b *pickfirstBalancer) Close() {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	b.closeSubConnsLocked()
+	b.state = connectivity.Shutdown
+}
+
+// ExitIdle moves the balancer out of idle state. It can be called concurrently
+// by the idlePicker and clientConn so access to variables should be
+// synchronized.
+func (b *pickfirstBalancer) ExitIdle() {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() {
+		b.firstPass = true
+		b.requestConnectionLocked()
+	}
+}
+
+func (b *pickfirstBalancer) closeSubConnsLocked() {
+	for _, sd := range b.subConns.Values() {
+		sd.(*scData).subConn.Shutdown()
+	}
+	b.subConns = resolver.NewAddressMap()
+}
+
+// deDupAddresses ensures that each address appears only once in the slice.
+func deDupAddresses(addrs []resolver.Address) []resolver.Address {
+	seenAddrs := resolver.NewAddressMap()
+	retAddrs := []resolver.Address{}
+
+	for _, addr := range addrs {
+		if _, ok := seenAddrs.Get(addr); ok {
+			continue
+		}
+		retAddrs = append(retAddrs, addr)
+	}
+	return retAddrs
+}
+
+// reconcileSubConnsLocked updates the active subchannels based on a new address
+// list from the resolver. It does this by:
+//   - closing subchannels: any existing subchannels associated with addresses
+//     that are no longer in the updated list are shut down.
+//   - removing subchannels: entries for these closed subchannels are removed
+//     from the subchannel map.
+//
+// This ensures that the subchannel map accurately reflects the current set of
+// addresses received from the name resolver.
+func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
+	newAddrsMap := resolver.NewAddressMap()
+	for _, addr := range newAddrs {
+		newAddrsMap.Set(addr, true)
+	}
+
+	for _, oldAddr := range b.subConns.Keys() {
+		if _, ok := newAddrsMap.Get(oldAddr); ok {
+			continue
+		}
+		val, _ := b.subConns.Get(oldAddr)
+		val.(*scData).subConn.Shutdown()
+		b.subConns.Delete(oldAddr)
+	}
+}
+
+// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
+// becomes ready, which means that all other subConn must be shutdown.
+func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
+	for _, v := range b.subConns.Values() {
+		sd := v.(*scData)
+		if sd.subConn != selected.subConn {
+			sd.subConn.Shutdown()
+		}
+	}
+	b.subConns = resolver.NewAddressMap()
+	b.subConns.Set(selected.addr, selected)
+}
+
+// requestConnectionLocked starts connecting on the subchannel corresponding to
+// the current address. If no subchannel exists, one is created. If the current
+// subchannel is in TransientFailure, a connection to the next address is
+// attempted until a subchannel is found.
+func (b *pickfirstBalancer) requestConnectionLocked() {
+	if !b.addressList.isValid() {
+		return
+	}
+	var lastErr error
+	for valid := true; valid; valid = b.addressList.increment() {
+		curAddr := b.addressList.currentAddress()
+		sd, ok := b.subConns.Get(curAddr)
+		if !ok {
+			var err error
+			// We want to assign the new scData to sd from the outer scope,
+			// hence we can't use := below.
+			sd, err = b.newSCData(curAddr)
+			if err != nil {
+				// This should never happen, unless the clientConn is being shut
+				// down.
+				if b.logger.V(2) {
+					b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
+				}
+				// Do nothing, the LB policy will be closed soon.
+				return
+			}
+			b.subConns.Set(curAddr, sd)
+		}
+
+		scd := sd.(*scData)
+		switch scd.state {
+		case connectivity.Idle:
+			scd.subConn.Connect()
+		case connectivity.TransientFailure:
+			// Try the next address.
+			lastErr = scd.lastErr
+			continue
+		case connectivity.Ready:
+			// Should never happen.
+			b.logger.Errorf("Requesting a connection even though we have a READY SubConn")
+		case connectivity.Shutdown:
+			// Should never happen.
+			b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map")
+		case connectivity.Connecting:
+			// Wait for the SubConn to report success or failure.
+		}
+		return
+	}
+	// All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
+	// first pass.
+	b.endFirstPassLocked(lastErr)
+}
+
+func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	oldState := sd.state
+	sd.state = newState.ConnectivityState
+	// Previously relevant SubConns can still callback with state updates.
+	// To prevent pickers from returning these obsolete SubConns, this logic
+	// is included to check if the current list of active SubConns includes this
+	// SubConn.
+	if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd {
+		return
+	}
+	if newState.ConnectivityState == connectivity.Shutdown {
+		return
+	}
+
+	if newState.ConnectivityState == connectivity.Ready {
+		b.shutdownRemainingLocked(sd)
+		if !b.addressList.seekTo(sd.addr) {
+			// This should not fail as we should have only one SubConn after
+			// entering READY. The SubConn should be present in the addressList.
+			b.logger.Errorf("Address %q not found address list in  %v", sd.addr, b.addressList.addresses)
+			return
+		}
+		b.state = connectivity.Ready
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: connectivity.Ready,
+			Picker:            &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+		})
+		return
+	}
+
+	// If the LB policy is READY, and it receives a subchannel state change,
+	// it means that the READY subchannel has failed.
+	// A SubConn can also transition from CONNECTING directly to IDLE when
+	// a transport is successfully created, but the connection fails
+	// before the SubConn can send the notification for READY. We treat
+	// this as a successful connection and transition to IDLE.
+	if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
+		// Once a transport fails, the balancer enters IDLE and starts from
+		// the first address when the picker is used.
+		b.shutdownRemainingLocked(sd)
+		b.state = connectivity.Idle
+		b.addressList.reset()
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: connectivity.Idle,
+			Picker:            &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
+		})
+		return
+	}
+
+	if b.firstPass {
+		switch newState.ConnectivityState {
+		case connectivity.Connecting:
+			// The balancer can be in either IDLE, CONNECTING or
+			// TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in
+			// TRANSIENT_FAILURE until it's READY. See A62.
+			// If the balancer is already in CONNECTING, no update is needed.
+			if b.state == connectivity.Idle {
+				b.state = connectivity.Connecting
+				b.cc.UpdateState(balancer.State{
+					ConnectivityState: connectivity.Connecting,
+					Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+				})
+			}
+		case connectivity.TransientFailure:
+			sd.lastErr = newState.ConnectionError
+			// Since we're re-using common SubConns while handling resolver
+			// updates, we could receive an out of turn TRANSIENT_FAILURE from
+			// a pass over the previous address list. We ignore such updates.
+
+			if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
+				return
+			}
+			if b.addressList.increment() {
+				b.requestConnectionLocked()
+				return
+			}
+			// End of the first pass.
+			b.endFirstPassLocked(newState.ConnectionError)
+		}
+		return
+	}
+
+	// We have finished the first pass, keep re-connecting failing SubConns.
+	switch newState.ConnectivityState {
+	case connectivity.TransientFailure:
+		b.numTF = (b.numTF + 1) % b.subConns.Len()
+		sd.lastErr = newState.ConnectionError
+		if b.numTF%b.subConns.Len() == 0 {
+			b.cc.UpdateState(balancer.State{
+				ConnectivityState: connectivity.TransientFailure,
+				Picker:            &picker{err: newState.ConnectionError},
+			})
+		}
+		// We don't need to request re-resolution since the SubConn already
+		// does that before reporting TRANSIENT_FAILURE.
+		// TODO: #7534 - Move re-resolution requests from SubConn into
+		// pick_first.
+	case connectivity.Idle:
+		sd.subConn.Connect()
+	}
+}
+
+func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) {
+	b.firstPass = false
+	b.numTF = 0
+	b.state = connectivity.TransientFailure
+
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: connectivity.TransientFailure,
+		Picker:            &picker{err: lastErr},
+	})
+	// Start re-connecting all the SubConns that are already in IDLE.
+	for _, v := range b.subConns.Values() {
+		sd := v.(*scData)
+		if sd.state == connectivity.Idle {
+			sd.subConn.Connect()
+		}
+	}
+}
+
+type picker struct {
+	result balancer.PickResult
+	err    error
+}
+
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	return p.result, p.err
+}
+
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+	exitIdle func()
+}
+
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	i.exitIdle()
+	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
+
+// addressList manages sequentially iterating over addresses present in a list
+// of endpoints. It provides a 1 dimensional view of the addresses present in
+// the endpoints.
+// This type is not safe for concurrent access.
+type addressList struct {
+	addresses []resolver.Address
+	idx       int
+}
+
+func (al *addressList) isValid() bool {
+	return al.idx < len(al.addresses)
+}
+
+func (al *addressList) size() int {
+	return len(al.addresses)
+}
+
+// increment moves to the next index in the address list.
+// This method returns false if it went off the list, true otherwise.
+func (al *addressList) increment() bool {
+	if !al.isValid() {
+		return false
+	}
+	al.idx++
+	return al.idx < len(al.addresses)
+}
+
+// currentAddress returns the current address pointed to in the addressList.
+// If the list is in an invalid state, it returns an empty address instead.
+func (al *addressList) currentAddress() resolver.Address {
+	if !al.isValid() {
+		return resolver.Address{}
+	}
+	return al.addresses[al.idx]
+}
+
+// first returns the first address in the list. If the list is empty, it returns
+// an empty address instead.
+func (al *addressList) first() resolver.Address {
+	if len(al.addresses) == 0 {
+		return resolver.Address{}
+	}
+	return al.addresses[0]
+}
+
+func (al *addressList) reset() {
+	al.idx = 0
+}
+
+func (al *addressList) updateAddrs(addrs []resolver.Address) {
+	al.addresses = addrs
+	al.reset()
+}
+
+// seekTo returns false if the needle was not found and the current index was
+// left unchanged.
+func (al *addressList) seekTo(needle resolver.Address) bool {
+	for ai, addr := range al.addresses {
+		if !equalAddressIgnoringBalAttributes(&addr, &needle) {
+			continue
+		}
+		al.idx = ai
+		return true
+	}
+	return false
+}
+
+// equalAddressIgnoringBalAttributes returns true is a and b are considered
+// equal. This is different from the Equal method on the resolver.Address type
+// which considers all fields to determine equality. Here, we only consider
+// fields that are meaningful to the SubConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+	return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+		a.Attributes.Equal(b.Attributes) &&
+		a.Metadata == b.Metadata
+}
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
index 8ad6ce2f..2a4f2878 100644
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -24,12 +24,14 @@ import (
 	"sync"
 
 	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/balancer/gracefulswitch"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/status"
 )
 
 var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
@@ -256,8 +258,8 @@ type acBalancerWrapper struct {
 	ccb           *ccBalancerWrapper // read-only
 	stateListener func(balancer.SubConnState)
 
-	mu        sync.Mutex
-	producers map[balancer.ProducerBuilder]*refCountedProducer
+	producersMu sync.Mutex
+	producers   map[balancer.ProducerBuilder]*refCountedProducer
 }
 
 // updateState is invoked by grpc to push a subConn state update to the
@@ -267,6 +269,9 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve
 		if ctx.Err() != nil || acbw.ccb.balancer == nil {
 			return
 		}
+		// Invalidate all producers on any state change.
+		acbw.closeProducers()
+
 		// Even though it is optional for balancers, gracefulswitch ensures
 		// opts.StateListener is set, so this cannot ever be nil.
 		// TODO: delete this comment when UpdateSubConnState is removed.
@@ -275,16 +280,6 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve
 			setConnectedAddress(&scs, curAddr)
 		}
 		acbw.stateListener(scs)
-		acbw.ac.mu.Lock()
-		defer acbw.ac.mu.Unlock()
-		if s == connectivity.Ready {
-			// When changing states to READY, reset stateReadyChan.  Wait until
-			// after we notify the LB policy's listener(s) in order to prevent
-			// ac.getTransport() from unblocking before the LB policy starts
-			// tracking the subchannel as READY.
-			close(acbw.ac.stateReadyChan)
-			acbw.ac.stateReadyChan = make(chan struct{})
-		}
 	})
 }
 
@@ -301,6 +296,7 @@ func (acbw *acBalancerWrapper) Connect() {
 }
 
 func (acbw *acBalancerWrapper) Shutdown() {
+	acbw.closeProducers()
 	acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
 }
 
@@ -308,9 +304,10 @@ func (acbw *acBalancerWrapper) Shutdown() {
 // ready, blocks until it is or ctx expires.  Returns an error when the context
 // expires or the addrConn is shut down.
 func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
-	transport, err := acbw.ac.getTransport(ctx)
-	if err != nil {
-		return nil, err
+	transport := acbw.ac.getReadyTransport()
+	if transport == nil {
+		return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready")
+
 	}
 	return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
 }
@@ -335,8 +332,8 @@ type refCountedProducer struct {
 }
 
 func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
-	acbw.mu.Lock()
-	defer acbw.mu.Unlock()
+	acbw.producersMu.Lock()
+	defer acbw.producersMu.Unlock()
 
 	// Look up existing producer from this builder.
 	pData := acbw.producers[pb]
@@ -353,13 +350,26 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (
 	// and delete the refCountedProducer from the map if the total reference
 	// count goes to zero.
 	unref := func() {
-		acbw.mu.Lock()
+		acbw.producersMu.Lock()
+		// If closeProducers has already closed this producer instance, refs is
+		// set to 0, so the check after decrementing will never pass, and the
+		// producer will not be double-closed.
 		pData.refs--
 		if pData.refs == 0 {
 			defer pData.close() // Run outside the acbw mutex
 			delete(acbw.producers, pb)
 		}
-		acbw.mu.Unlock()
+		acbw.producersMu.Unlock()
 	}
 	return pData.producer, grpcsync.OnceFunc(unref)
 }
+
+func (acbw *acBalancerWrapper) closeProducers() {
+	acbw.producersMu.Lock()
+	defer acbw.producersMu.Unlock()
+	for pb, pData := range acbw.producers {
+		pData.refs = 0
+		pData.close()
+		delete(acbw.producers, pb)
+	}
+}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 9c8850e3..19763f8e 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -825,14 +825,13 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
 	}
 
 	ac := &addrConn{
-		state:          connectivity.Idle,
-		cc:             cc,
-		addrs:          copyAddresses(addrs),
-		scopts:         opts,
-		dopts:          cc.dopts,
-		channelz:       channelz.RegisterSubChannel(cc.channelz, ""),
-		resetBackoff:   make(chan struct{}),
-		stateReadyChan: make(chan struct{}),
+		state:        connectivity.Idle,
+		cc:           cc,
+		addrs:        copyAddresses(addrs),
+		scopts:       opts,
+		dopts:        cc.dopts,
+		channelz:     channelz.RegisterSubChannel(cc.channelz, ""),
+		resetBackoff: make(chan struct{}),
 	}
 	ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
 	// Start with our address set to the first address; this may be updated if
@@ -1141,10 +1140,15 @@ func (cc *ClientConn) Close() error {
 
 	<-cc.resolverWrapper.serializer.Done()
 	<-cc.balancerWrapper.serializer.Done()
-
+	var wg sync.WaitGroup
 	for ac := range conns {
-		ac.tearDown(ErrClientConnClosing)
+		wg.Add(1)
+		go func(ac *addrConn) {
+			defer wg.Done()
+			ac.tearDown(ErrClientConnClosing)
+		}(ac)
 	}
+	wg.Wait()
 	cc.addTraceEvent("deleted")
 	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
 	// trace reference to the entity being deleted, and thus prevent it from being
@@ -1179,8 +1183,7 @@ type addrConn struct {
 	addrs   []resolver.Address // All addresses that the resolver resolved to.
 
 	// Use updateConnectivityState for updating addrConn's connectivity state.
-	state          connectivity.State
-	stateReadyChan chan struct{} // closed and recreated on every READY state change.
+	state connectivity.State
 
 	backoffIdx   int // Needs to be stateful for resetConnectBackoff.
 	resetBackoff chan struct{}
@@ -1251,6 +1254,8 @@ func (ac *addrConn) resetTransportAndUnlock() {
 	ac.mu.Unlock()
 
 	if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
+		// TODO: #7534 - Move re-resolution requests into the pick_first LB policy
+		// to ensure one resolution request per pass instead of per subconn failure.
 		ac.cc.resolveNow(resolver.ResolveNowOptions{})
 		ac.mu.Lock()
 		if acCtx.Err() != nil {
@@ -1292,7 +1297,7 @@ func (ac *addrConn) resetTransportAndUnlock() {
 	ac.mu.Unlock()
 }
 
-// tryAllAddrs tries to creates a connection to the addresses, and stop when at
+// tryAllAddrs tries to create a connection to the addresses, and stop when at
 // the first successful one. It returns an error if no address was successfully
 // connected, or updates ac appropriately with the new transport.
 func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
@@ -1504,29 +1509,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
 	return nil
 }
 
-// getTransport waits until the addrconn is ready and returns the transport.
-// If the context expires first, returns an appropriate status.  If the
-// addrConn is stopped first, returns an Unavailable status error.
-func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
-	for ctx.Err() == nil {
-		ac.mu.Lock()
-		t, state, sc := ac.transport, ac.state, ac.stateReadyChan
-		ac.mu.Unlock()
-		if state == connectivity.Ready {
-			return t, nil
-		}
-		if state == connectivity.Shutdown {
-			return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
-		}
-
-		select {
-		case <-ctx.Done():
-		case <-sc:
-		}
-	}
-	return nil, status.FromContextError(ctx.Err()).Err()
-}
-
 // tearDown starts to tear down the addrConn.
 //
 // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
index 41143585..e163a473 100644
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{
 
 // NewTLS uses c to construct a TransportCredentials based on TLS.
 func NewTLS(c *tls.Config) TransportCredentials {
-	tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
-	tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
+	config := applyDefaults(c)
+	if config.GetConfigForClient != nil {
+		oldFn := config.GetConfigForClient
+		config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
+			cfgForClient, err := oldFn(hello)
+			if err != nil || cfgForClient == nil {
+				return cfgForClient, err
+			}
+			return applyDefaults(cfgForClient), nil
+		}
+	}
+	return &tlsCreds{config: config}
+}
+
+func applyDefaults(c *tls.Config) *tls.Config {
+	config := credinternal.CloneTLSConfig(c)
+	config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos)
 	// If the user did not configure a MinVersion and did not configure a
 	// MaxVersion < 1.2, use MinVersion=1.2, which is required by
 	// https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
-	if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) {
-		tc.config.MinVersion = tls.VersionTLS12
+	if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) {
+		config.MinVersion = tls.VersionTLS12
 	}
 	// If the user did not configure CipherSuites, use all "secure" cipher
 	// suites reported by the TLS package, but remove some explicitly forbidden
 	// by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
-	if tc.config.CipherSuites == nil {
+	if config.CipherSuites == nil {
 		for _, cs := range tls.CipherSuites() {
 			if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
-				tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID)
+				config.CipherSuites = append(config.CipherSuites, cs.ID)
 			}
 		}
 	}
-	return tc
+	return config
 }
 
 // NewClientTLSFromCert constructs TLS credentials from the provided root
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 2b285bee..518692c3 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -436,7 +436,7 @@ func WithTimeout(d time.Duration) DialOption {
 // option to true from the Control field. For a concrete example of how to do
 // this, see internal.NetDialerWithTCPKeepalive().
 //
-// For more information, please see [issue 23459] in the Go github repo.
+// For more information, please see [issue 23459] in the Go GitHub repo.
 //
 // [issue 23459]: https://github.com/golang/go/issues/23459
 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
index 13821a92..85540f86 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
@@ -33,6 +33,8 @@ type lbConfig struct {
 	childConfig  serviceconfig.LoadBalancingConfig
 }
 
+// ChildName returns the name of the child balancer of the gracefulswitch
+// Balancer.
 func ChildName(l serviceconfig.LoadBalancingConfig) string {
 	return l.(*lbConfig).childBuilder.Name()
 }
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go
index d7e9e1d5..3ec66279 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/channel.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go
@@ -43,6 +43,8 @@ type Channel struct {
 	// Non-zero traceRefCount means the trace of this channel cannot be deleted.
 	traceRefCount int32
 
+	// ChannelMetrics holds connectivity state, target and call metrics for the
+	// channel within channelz.
 	ChannelMetrics ChannelMetrics
 }
 
@@ -50,6 +52,8 @@ type Channel struct {
 // nesting.
 func (c *Channel) channelzIdentifier() {}
 
+// String returns a string representation of the Channel, including its parent
+// entity and ID.
 func (c *Channel) String() string {
 	if c.Parent == nil {
 		return fmt.Sprintf("Channel #%d", c.ID)
@@ -61,24 +65,31 @@ func (c *Channel) id() int64 {
 	return c.ID
 }
 
+// SubChans returns a copy of the map of sub-channels associated with the
+// Channel.
 func (c *Channel) SubChans() map[int64]string {
 	db.mu.RLock()
 	defer db.mu.RUnlock()
 	return copyMap(c.subChans)
 }
 
+// NestedChans returns a copy of the map of nested channels associated with the
+// Channel.
 func (c *Channel) NestedChans() map[int64]string {
 	db.mu.RLock()
 	defer db.mu.RUnlock()
 	return copyMap(c.nestedChans)
 }
 
+// Trace returns a copy of the Channel's trace data.
 func (c *Channel) Trace() *ChannelTrace {
 	db.mu.RLock()
 	defer db.mu.RUnlock()
 	return c.trace.copy()
 }
 
+// ChannelMetrics holds connectivity state, target and call metrics for the
+// channel within channelz.
 type ChannelMetrics struct {
 	// The current connectivity state of the channel.
 	State atomic.Pointer[connectivity.State]
@@ -136,12 +147,16 @@ func strFromPointer(s *string) string {
 	return *s
 }
 
+// String returns a string representation of the ChannelMetrics, including its
+// state, target, and call metrics.
 func (c *ChannelMetrics) String() string {
 	return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
 		c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
 	)
 }
 
+// NewChannelMetricForTesting creates a new instance of ChannelMetrics with
+// specified initial values for testing purposes.
 func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
 	c := &ChannelMetrics{}
 	c.State.Store(&state)
diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go
index cdfc49d6..b5a82499 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/server.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/server.go
@@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se
 	return sm
 }
 
+// CopyFrom copies the metrics data from the provided ServerMetrics
+// instance into the current instance.
 func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
 	sm.CallsStarted.Store(o.CallsStarted.Load())
 	sm.CallsSucceeded.Store(o.CallsSucceeded.Load())
diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go
index fa64834b..90103847 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/socket.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go
@@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct {
 	RemoteFlowControlWindow int64
 }
 
+// SocketType represents the type of socket.
 type SocketType string
 
+// SocketType can be one of these.
 const (
 	SocketTypeNormal = "NormalSocket"
 	SocketTypeListen = "ListenSocket"
 )
 
+// Socket represents a socket within channelz which includes socket
+// metrics and data related to socket activity and provides methods
+// for managing and interacting with sockets.
 type Socket struct {
 	Entity
 	SocketType       SocketType
@@ -100,6 +105,8 @@ type Socket struct {
 	Security credentials.ChannelzSecurityValue
 }
 
+// String returns a string representation of the Socket, including its parent
+// entity, socket type, and ID.
 func (ls *Socket) String() string {
 	return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
 }
diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
index 3b88e4cb..b20802e6 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
@@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 {
 	return sc.ID
 }
 
+// Sockets returns a copy of the sockets map associated with the SubChannel.
 func (sc *SubChannel) Sockets() map[int64]string {
 	db.mu.RLock()
 	defer db.mu.RUnlock()
 	return copyMap(sc.sockets)
 }
 
+// Trace returns a copy of the ChannelTrace associated with the SubChannel.
 func (sc *SubChannel) Trace() *ChannelTrace {
 	db.mu.RLock()
 	defer db.mu.RUnlock()
diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go
index 36b86740..2bffe477 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/trace.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go
@@ -79,13 +79,21 @@ type TraceEvent struct {
 	Parent   *TraceEvent
 }
 
+// ChannelTrace provides tracing information for a channel.
+// It tracks various events and metadata related to the channel's lifecycle
+// and operations.
 type ChannelTrace struct {
-	cm           *channelMap
-	clearCalled  bool
+	cm          *channelMap
+	clearCalled bool
+	// The time when the trace was created.
 	CreationTime time.Time
-	EventNum     int64
-	mu           sync.Mutex
-	Events       []*traceEvent
+	// A counter for the number of events recorded in the
+	// trace.
+	EventNum int64
+	mu       sync.Mutex
+	// A slice of traceEvent pointers representing the events recorded for
+	// this channel.
+	Events []*traceEvent
 }
 
 func (c *ChannelTrace) copy() *ChannelTrace {
@@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{
 	RefNormalSocket: "NormalSocket",
 }
 
+// String returns a string representation of the RefChannelType
 func (r RefChannelType) String() string {
 	return refChannelTypeToString[r]
 }
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 452985f8..6e7dd6b7 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -50,6 +50,11 @@ var (
 	// xDS fallback is turned on. If this is unset or is false, only the first
 	// xDS server in the list of server configs will be used.
 	XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false)
+	// NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
+	// instead of the exiting pickfirst implementation. This can be enabled by
+	// setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
+	// to "true".
+	NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false)
 )
 
 func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index 19b9d639..8e8e8612 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -53,7 +53,7 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
 	return cs
 }
 
-// TrySchedule tries to schedules the provided callback function f to be
+// TrySchedule tries to schedule the provided callback function f to be
 // executed in the order it was added. This is a best-effort operation. If the
 // context passed to NewCallbackSerializer was canceled before this method is
 // called, the callback will not be scheduled.
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
index ec62b477..683d1955 100644
--- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) {
 }
 
 // baseContentType is the base content-type for gRPC.  This is a valid
-// content-type on it's own, but can also include a content-subtype such as
+// content-type on its own, but can also include a content-subtype such as
 // "proto" as a suffix after "+" or ";".  See
 // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
 // for more details.
diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
index fe49cb74..2c13ee9d 100644
--- a/vendor/google.golang.org/grpc/internal/idle/idle.go
+++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool {
 	return true
 }
 
+// EnterIdleModeForTesting instructs the channel to enter idle mode.
 func (m *Manager) EnterIdleModeForTesting() {
 	m.tryEnterIdleMode()
 }
@@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error {
 		//   came in and OnCallBegin() noticed that the calls count is negative.
 		// - Channel is in idle mode, and multiple new RPCs come in at the same
 		//   time, all of them notice a negative calls count in OnCallBegin and get
-		//   here. The first one to get the lock would got the channel to exit idle.
+		//   here. The first one to get the lock would get the channel to exit idle.
 		// - Channel is not in idle mode, and the user calls Connect which calls
 		//   m.ExitIdleMode.
 		//
@@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool {
 	return atomic.LoadInt32(&m.closed) == 1
 }
 
+// Close stops the timer associated with the Manager, if it exists.
 func (m *Manager) Close() {
 	atomic.StoreInt32(&m.closed, 1)
 
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 7aae9240..20b4dc3d 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -191,6 +191,8 @@ var (
 	// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
 	ExitIdleModeForTesting any // func(*grpc.ClientConn) error
 
+	// ChannelzTurnOffForTesting disables the Channelz service for testing
+	// purposes.
 	ChannelzTurnOffForTesting func()
 
 	// TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
@@ -205,10 +207,6 @@ var (
 	// default resolver scheme.
 	UserSetDefaultScheme = false
 
-	// ShuffleAddressListForTesting pseudo-randomizes the order of addresses.  n
-	// is the number of elements.  swap swaps the elements with indexes i and j.
-	ShuffleAddressListForTesting any // func(n int, swap func(i, j int))
-
 	// ConnectedAddress returns the connected address for a SubConnState. The
 	// address is only valid if the state is READY.
 	ConnectedAddress any // func (scs SubConnState) resolver.Address
@@ -235,7 +233,7 @@ var (
 //
 // The implementation is expected to create a health checking RPC stream by
 // calling newStream(), watch for the health status of serviceName, and report
-// it's health back by calling setConnectivityState().
+// its health back by calling setConnectivityState().
 //
 // The health checking protocol is defined at:
 // https://github.com/grpc/grpc/blob/master/doc/health-checking.md
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index 4552db16..8691698e 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -177,7 +177,7 @@ type dnsResolver struct {
 	// finished. Otherwise, data race will be possible. [Race Example] in
 	// dns_resolver_test we replace the real lookup functions with mocked ones to
 	// facilitate testing. If Close() doesn't wait for watcher() goroutine
-	// finishes, race detector sometimes will warns lookup (READ the lookup
+	// finishes, race detector sometimes will warn lookup (READ the lookup
 	// function pointers) inside watcher() goroutine has data race with
 	// replaceNetFunc (WRITE the lookup function pointers).
 	wg                   sync.WaitGroup
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
index be110d41..79044657 100644
--- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
+++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -54,6 +54,8 @@ func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
 	}
 }
 
+// RecordInt64Count records the measurement alongside labels on the int
+// count associated with the provided handle.
 func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
 	verifyLabels(handle.Descriptor(), labels...)
 
@@ -62,6 +64,8 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle,
 	}
 }
 
+// RecordFloat64Count records the measurement alongside labels on the float
+// count associated with the provided handle.
 func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
 	verifyLabels(handle.Descriptor(), labels...)
 
@@ -70,6 +74,8 @@ func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHand
 	}
 }
 
+// RecordInt64Histo records the measurement alongside labels on the int
+// histo associated with the provided handle.
 func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
 	verifyLabels(handle.Descriptor(), labels...)
 
@@ -78,6 +84,8 @@ func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle,
 	}
 }
 
+// RecordFloat64Histo records the measurement alongside labels on the float
+// histo associated with the provided handle.
 func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
 	verifyLabels(handle.Descriptor(), labels...)
 
@@ -86,6 +94,8 @@ func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHand
 	}
 }
 
+// RecordInt64Gauge records the measurement alongside labels on the int
+// gauge associated with the provided handle.
 func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
 	verifyLabels(handle.Descriptor(), labels...)
 
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index 75792538..1186f1e9 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -149,6 +149,8 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
 
 // Details returns a slice of details messages attached to the status.
 // If a detail cannot be decoded, the error is returned in place of the detail.
+// If the detail can be decoded, the proto message returned is of the same
+// type that was given to WithDetails().
 func (s *Status) Details() []any {
 	if s == nil || s.s == nil {
 		return nil
@@ -160,7 +162,38 @@ func (s *Status) Details() []any {
 			details = append(details, err)
 			continue
 		}
-		details = append(details, detail)
+		// The call to MessageV1Of is required to unwrap the proto message if
+		// it implemented only the MessageV1 API. The proto message would have
+		// been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are
+		// added to a global registry used by any.UnmarshalNew().
+		// MessageV1Of has the following behaviour:
+		// 1. If the given message is a wrapped MessageV1, it returns the
+		//   unwrapped value.
+		// 2. If the given message already implements MessageV1, it returns it
+		//   as is.
+		// 3. Else, it wraps the MessageV2 in a MessageV1 wrapper.
+		//
+		// Since the Status.WithDetails() API only accepts MessageV1, calling
+		// MessageV1Of ensures we return the same type that was given to
+		// WithDetails:
+		// * If the give type implemented only MessageV1, the unwrapping from
+		//   point 1 above will restore the type.
+		// * If the given type implemented both MessageV1 and MessageV2, point 2
+		//   above will ensure no wrapping is performed.
+		// * If the given type implemented only MessageV2 and was wrapped using
+		//   MessageV1Of before passing to WithDetails(), it would be unwrapped
+		//   in WithDetails by calling MessageV2Of(). Point 3 above will ensure
+		//   that the type is wrapped in a MessageV1 wrapper again before
+		//   returning. Note that protoc-gen-go doesn't generate code which
+		//   implements ONLY MessageV2 at the time of writing.
+		//
+		// NOTE: Status details can also be added using the FromProto method.
+		// This could theoretically allow passing a Detail message that only
+		// implements the V2 API. In such a case the message will be wrapped in
+		// a MessageV1 wrapper when fetched using Details().
+		// Since protoc-gen-go generates only code that implements both V1 and
+		// V2 APIs for backward compatibility, this is not a concern.
+		details = append(details, protoadapt.MessageV1Of(detail))
 	}
 	return details
 }
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index c769deab..62b81885 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -86,9 +86,9 @@ type http2Client struct {
 	writerDone chan struct{} // sync point to enable testing.
 	// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
 	// that the server sent GoAway on this transport.
-	goAway chan struct{}
-
-	framer *framer
+	goAway        chan struct{}
+	keepaliveDone chan struct{} // Closed when the keepalive goroutine exits.
+	framer        *framer
 	// controlBuf delivers all the control related tasks (e.g., window
 	// updates, reset streams, and various settings) to the controller.
 	// Do not access controlBuf with mu held.
@@ -335,6 +335,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
 		readerDone:            make(chan struct{}),
 		writerDone:            make(chan struct{}),
 		goAway:                make(chan struct{}),
+		keepaliveDone:         make(chan struct{}),
 		framer:                newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
 		fc:                    &trInFlow{limit: uint32(icwz)},
 		scheme:                scheme,
@@ -527,8 +528,9 @@ func (t *http2Client) getPeer() *peer.Peer {
 // to be the last frame loopy writes to the transport.
 func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
 	t.mu.Lock()
-	defer t.mu.Unlock()
-	if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil {
+	maxStreamID := t.nextID - 2
+	t.mu.Unlock()
+	if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil {
 		return false, err
 	}
 	return false, g.closeConn
@@ -1008,6 +1010,9 @@ func (t *http2Client) Close(err error) {
 		// should unblock it so that the goroutine eventually exits.
 		t.kpDormancyCond.Signal()
 	}
+	// Append info about previous goaways if there were any, since this may be important
+	// for understanding the root cause for this connection to be closed.
+	goAwayDebugMessage := t.goAwayDebugMessage
 	t.mu.Unlock()
 
 	// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
@@ -1025,11 +1030,13 @@ func (t *http2Client) Close(err error) {
 	}
 	t.cancel()
 	t.conn.Close()
+	// Waits for the reader and keepalive goroutines to exit before returning to
+	// ensure all resources are cleaned up before Close can return.
+	<-t.readerDone
+	if t.keepaliveEnabled {
+		<-t.keepaliveDone
+	}
 	channelz.RemoveEntry(t.channelz.ID)
-	// Append info about previous goaways if there were any, since this may be important
-	// for understanding the root cause for this connection to be closed.
-	_, goAwayDebugMessage := t.GetGoAwayReason()
-
 	var st *status.Status
 	if len(goAwayDebugMessage) > 0 {
 		st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
@@ -1316,11 +1323,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) {
 	t.controlBuf.put(pingAck)
 }
 
-func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
 	t.mu.Lock()
 	if t.state == closing {
 		t.mu.Unlock()
-		return
+		return nil
 	}
 	if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
 		// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
@@ -1332,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
 	id := f.LastStreamID
 	if id > 0 && id%2 == 0 {
 		t.mu.Unlock()
-		t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id))
-		return
+		return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)
 	}
 	// A client can receive multiple GoAways from the server (see
 	// https://github.com/grpc/grpc-go/issues/1387).  The idea is that the first
@@ -1350,8 +1356,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
 		// If there are multiple GoAways the first one should always have an ID greater than the following ones.
 		if id > t.prevGoAwayID {
 			t.mu.Unlock()
-			t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
-			return
+			return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)
 		}
 	default:
 		t.setGoAwayReason(f)
@@ -1375,8 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
 	t.prevGoAwayID = id
 	if len(t.activeStreams) == 0 {
 		t.mu.Unlock()
-		t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
-		return
+		return connectionErrorf(true, nil, "received goaway and there are no active streams")
 	}
 
 	streamsToClose := make([]*Stream, 0)
@@ -1393,6 +1397,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
 	for _, stream := range streamsToClose {
 		t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
 	}
+	return nil
 }
 
 // setGoAwayReason sets the value of t.goAwayReason based
@@ -1628,7 +1633,13 @@ func (t *http2Client) readServerPreface() error {
 // network connection.  If the server preface is not read successfully, an
 // error is pushed to errCh; otherwise errCh is closed with no error.
 func (t *http2Client) reader(errCh chan<- error) {
-	defer close(t.readerDone)
+	var errClose error
+	defer func() {
+		close(t.readerDone)
+		if errClose != nil {
+			t.Close(errClose)
+		}
+	}()
 
 	if err := t.readServerPreface(); err != nil {
 		errCh <- err
@@ -1669,7 +1680,7 @@ func (t *http2Client) reader(errCh chan<- error) {
 				continue
 			}
 			// Transport error.
-			t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
+			errClose = connectionErrorf(true, err, "error reading from server: %v", err)
 			return
 		}
 		switch frame := frame.(type) {
@@ -1684,7 +1695,7 @@ func (t *http2Client) reader(errCh chan<- error) {
 		case *http2.PingFrame:
 			t.handlePing(frame)
 		case *http2.GoAwayFrame:
-			t.handleGoAway(frame)
+			errClose = t.handleGoAway(frame)
 		case *http2.WindowUpdateFrame:
 			t.handleWindowUpdate(frame)
 		default:
@@ -1697,6 +1708,13 @@ func (t *http2Client) reader(errCh chan<- error) {
 
 // keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
 func (t *http2Client) keepalive() {
+	var err error
+	defer func() {
+		close(t.keepaliveDone)
+		if err != nil {
+			t.Close(err)
+		}
+	}()
 	p := &ping{data: [8]byte{}}
 	// True iff a ping has been sent, and no data has been received since then.
 	outstandingPing := false
@@ -1720,7 +1738,7 @@ func (t *http2Client) keepalive() {
 				continue
 			}
 			if outstandingPing && timeoutLeft <= 0 {
-				t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
+				err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")
 				return
 			}
 			t.mu.Lock()
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 924ba4f3..e12cb0bc 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -547,6 +547,15 @@ func (s *Stream) write(m recvMsg) {
 	s.buf.put(m)
 }
 
+// ReadHeader reads data into the provided header slice from the stream. It
+// first checks if there was an error during a previous read operation and
+// returns it if present. It then requests a read operation for the length of
+// the header. It continues to read from the stream until the entire header
+// slice is filled or an error occurs. If an `io.EOF` error is encountered
+// with partially read data, it is converted to `io.ErrUnexpectedEOF` to
+// indicate an unexpected end of the stream. The method returns any error
+// encountered during the read process or nil if the header was successfully
+// read.
 func (s *Stream) ReadHeader(header []byte) (err error) {
 	// Don't request a read if there was an error earlier
 	if er := s.trReader.er; er != nil {
diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go
index 4d66b2cc..ecbf0b9a 100644
--- a/vendor/google.golang.org/grpc/mem/buffers.go
+++ b/vendor/google.golang.org/grpc/mem/buffers.go
@@ -65,6 +65,9 @@ var (
 	refObjectPool    = sync.Pool{New: func() any { return new(atomic.Int32) }}
 )
 
+// IsBelowBufferPoolingThreshold returns true if the given size is less than or
+// equal to the threshold for buffer pooling. This is used to determine whether
+// to pool buffers or allocate them directly.
 func IsBelowBufferPoolingThreshold(size int) bool {
 	return size <= bufferPoolingThreshold
 }
@@ -89,7 +92,11 @@ func newBuffer() *buffer {
 //
 // Note that the backing array of the given data is not copied.
 func NewBuffer(data *[]byte, pool BufferPool) Buffer {
-	if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) {
+	// Use the buffer's capacity instead of the length, otherwise buffers may
+	// not be reused under certain conditions. For example, if a large buffer
+	// is acquired from the pool, but fewer bytes than the buffering threshold
+	// are written to it, the buffer will not be returned to the pool.
+	if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) {
 		return (SliceBuffer)(*data)
 	}
 	b := newBuffer()
@@ -194,19 +201,19 @@ func (b *buffer) read(buf []byte) (int, Buffer) {
 	return n, b
 }
 
-// String returns a string representation of the buffer. May be used for
-// debugging purposes.
 func (b *buffer) String() string {
 	return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
 }
 
+// ReadUnsafe reads bytes from the given Buffer into the provided slice.
+// It does not perform safety checks.
 func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
 	return buf.read(dst)
 }
 
 // SplitUnsafe modifies the receiver to point to the first n bytes while it
-// returns a new reference to the remaining bytes. The returned Buffer functions
-// just like a normal reference acquired using Ref().
+// returns a new reference to the remaining bytes. The returned Buffer
+// functions just like a normal reference acquired using Ref().
 func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
 	return buf.split(n)
 }
@@ -232,12 +239,21 @@ func (e emptyBuffer) read([]byte) (int, Buffer) {
 	return 0, e
 }
 
+// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides
+// methods for reading, splitting, and managing the byte slice.
 type SliceBuffer []byte
 
+// ReadOnlyData returns the byte slice.
 func (s SliceBuffer) ReadOnlyData() []byte { return s }
-func (s SliceBuffer) Ref()                 {}
-func (s SliceBuffer) Free()                {}
-func (s SliceBuffer) Len() int             { return len(s) }
+
+// Ref is a noop implementation of Ref.
+func (s SliceBuffer) Ref() {}
+
+// Free is a noop implementation of Free.
+func (s SliceBuffer) Free() {}
+
+// Len is a noop implementation of Len.
+func (s SliceBuffer) Len() int { return len(s) }
 
 func (s SliceBuffer) split(n int) (left, right Buffer) {
 	return s[:n], s[n:]
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 2d96f140..aba1ae3e 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -791,9 +791,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
 		if !haveCompressor {
 			if isServer {
 				return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
-			} else {
-				return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
 			}
+			return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
 		}
 	default:
 		return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index a96b6a6b..d50e8435 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
 package grpc
 
 // Version is the current grpc version.
-const Version = "1.67.1"
+const Version = "1.68.0"
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
index 4b177c82..e9fe1039 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa
 		switch tok.Kind() {
 		case json.ObjectClose:
 			if !found {
-				return d.newError(tok.Pos(), `missing "value" field`)
+				// We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type,
+				// for compatibility with other proto runtimes that have interpreted the spec differently.
+				if m.Descriptor().FullName() != genid.Empty_message_fullname {
+					return d.newError(tok.Pos(), `missing "value" field`)
+				}
 			}
 			return nil
 
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index ff6a38360add36f53d48bb0863b701696e0d7b2d..2c0693d7abbf532f021dafc96e7568f57214b8e7 100644
GIT binary patch
literal 99
zcmd;*m3YRk#C*w)K}(o}QGiK;Nr72|(SYfa9SaAe1S6NM#B;bblK@aefe9$h2$E(1
dOTS=O5(H{Ql40Ut&|<x0#{^XSj9ud;I{@m>548XQ

literal 93
zcmd;*mUzal#C*w)K}(Q>QGiK;Nr72|(SYfa9TNv5m$bxlxFnMRqXeS@6Ht;7B*_4j
Ve8H{+(u69m1u{(G8N0>{b^xZ!4_5#H

diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
index 08dad769..bf1aba0e 100644
--- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -10,4 +10,9 @@ import "google.golang.org/protobuf/types/descriptorpb"
 const (
 	Minimum = descriptorpb.Edition_EDITION_PROTO2
 	Maximum = descriptorpb.Edition_EDITION_2023
+
+	// MaximumKnown is the maximum edition that is known to Go Protobuf, but not
+	// declared as supported. In other words: end users cannot use it, but
+	// testprotos inside Go Protobuf can.
+	MaximumKnown = descriptorpb.Edition_EDITION_2024
 )
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index fa790e0f..f3252985 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -32,6 +32,7 @@ const (
 	EditionProto2      Edition = 998
 	EditionProto3      Edition = 999
 	Edition2023        Edition = 1000
+	Edition2024        Edition = 1001
 	EditionUnsupported Edition = 100000
 )
 
@@ -77,28 +78,42 @@ type (
 		Locations SourceLocations
 	}
 
+	// EditionFeatures is a frequently-instantiated struct, so please take care
+	// to minimize padding when adding new fields to this struct (add them in
+	// the right place/order).
 	EditionFeatures struct {
+		// StripEnumPrefix determines if the plugin generates enum value
+		// constants as-is, with their prefix stripped, or both variants.
+		StripEnumPrefix int
+
 		// IsFieldPresence is true if field_presence is EXPLICIT
 		// https://protobuf.dev/editions/features/#field_presence
 		IsFieldPresence bool
+
 		// IsFieldPresence is true if field_presence is LEGACY_REQUIRED
 		// https://protobuf.dev/editions/features/#field_presence
 		IsLegacyRequired bool
+
 		// IsOpenEnum is true if enum_type is OPEN
 		// https://protobuf.dev/editions/features/#enum_type
 		IsOpenEnum bool
+
 		// IsPacked is true if repeated_field_encoding is PACKED
 		// https://protobuf.dev/editions/features/#repeated_field_encoding
 		IsPacked bool
+
 		// IsUTF8Validated is true if utf_validation is VERIFY
 		// https://protobuf.dev/editions/features/#utf8_validation
 		IsUTF8Validated bool
+
 		// IsDelimitedEncoded is true if message_encoding is DELIMITED
 		// https://protobuf.dev/editions/features/#message_encoding
 		IsDelimitedEncoded bool
+
 		// IsJSONCompliant is true if json_format is ALLOW
 		// https://protobuf.dev/editions/features/#json_format
 		IsJSONCompliant bool
+
 		// GenerateLegacyUnmarshalJSON determines if the plugin generates the
 		// UnmarshalJSON([]byte) error method for enums.
 		GenerateLegacyUnmarshalJSON bool
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index fd4d0c83..7611796e 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -32,6 +32,10 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
 			v, m := protowire.ConsumeVarint(b)
 			b = b[m:]
 			parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v)
+		case genid.GoFeatures_StripEnumPrefix_field_number:
+			v, m := protowire.ConsumeVarint(b)
+			b = b[m:]
+			parent.StripEnumPrefix = int(v)
 		default:
 			panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index 7f67cbb6..09792d96 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -21,13 +21,30 @@ const (
 // Field names for pb.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
+	GoFeatures_StripEnumPrefix_field_name         protoreflect.Name = "strip_enum_prefix"
 
 	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
+	GoFeatures_StripEnumPrefix_field_fullname         protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix"
 )
 
 // Field numbers for pb.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
+	GoFeatures_StripEnumPrefix_field_number         protoreflect.FieldNumber = 3
+)
+
+// Full and short names for pb.GoFeatures.StripEnumPrefix.
+const (
+	GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix"
+	GoFeatures_StripEnumPrefix_enum_name     = "StripEnumPrefix"
+)
+
+// Enum values for pb.GoFeatures.StripEnumPrefix.
+const (
+	GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value   = 0
+	GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value          = 1
+	GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2
+	GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value         = 3
 )
 
 // Extension numbers
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index fb8e15e8..62a52a40 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
 const (
 	Major      = 1
 	Minor      = 35
-	Patch      = 1
+	Patch      = 2
 	PreRelease = ""
 )
 
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index 8fbecb4f..69a05050 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -13,6 +13,8 @@
 package protodesc
 
 import (
+	"strings"
+
 	"google.golang.org/protobuf/internal/editionssupport"
 	"google.golang.org/protobuf/internal/errors"
 	"google.golang.org/protobuf/internal/filedesc"
@@ -102,13 +104,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
 	default:
 		return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
 	}
-	if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
-		return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
-	}
 	f.L1.Path = fd.GetName()
 	if f.L1.Path == "" {
 		return nil, errors.New("file path must be populated")
 	}
+	if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
+		// Allow cmd/protoc-gen-go/testdata to use any edition for easier
+		// testing of upcoming edition features.
+		if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") {
+			return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
+		}
+	}
 	f.L1.Package = protoreflect.FullName(fd.GetPackage())
 	if !f.L1.Package.IsValid() && f.L1.Package != "" {
 		return nil, errors.New("invalid package: %q", f.L1.Package)
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index 002e0047..d0aeab95 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -43,6 +43,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
 		return descriptorpb.Edition_EDITION_PROTO3
 	case filedesc.Edition2023:
 		return descriptorpb.Edition_EDITION_2023
+	case filedesc.Edition2024:
+		return descriptorpb.Edition_EDITION_2024
 	default:
 		panic(fmt.Sprintf("unknown value for edition: %v", ed))
 	}
@@ -127,6 +129,9 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp
 		if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil {
 			parentFS.GenerateLegacyUnmarshalJSON = *luje
 		}
+		if sep := goFeatures.StripEnumPrefix; sep != nil {
+			parentFS.StripEnumPrefix = int(*sep)
+		}
 	}
 
 	return parentFS
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index c7e860fc..5067b89e 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -18,13 +18,76 @@ import (
 	sync "sync"
 )
 
+type GoFeatures_StripEnumPrefix int32
+
+const (
+	GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED   GoFeatures_StripEnumPrefix = 0
+	GoFeatures_STRIP_ENUM_PREFIX_KEEP          GoFeatures_StripEnumPrefix = 1
+	GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2
+	GoFeatures_STRIP_ENUM_PREFIX_STRIP         GoFeatures_StripEnumPrefix = 3
+)
+
+// Enum value maps for GoFeatures_StripEnumPrefix.
+var (
+	GoFeatures_StripEnumPrefix_name = map[int32]string{
+		0: "STRIP_ENUM_PREFIX_UNSPECIFIED",
+		1: "STRIP_ENUM_PREFIX_KEEP",
+		2: "STRIP_ENUM_PREFIX_GENERATE_BOTH",
+		3: "STRIP_ENUM_PREFIX_STRIP",
+	}
+	GoFeatures_StripEnumPrefix_value = map[string]int32{
+		"STRIP_ENUM_PREFIX_UNSPECIFIED":   0,
+		"STRIP_ENUM_PREFIX_KEEP":          1,
+		"STRIP_ENUM_PREFIX_GENERATE_BOTH": 2,
+		"STRIP_ENUM_PREFIX_STRIP":         3,
+	}
+)
+
+func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix {
+	p := new(GoFeatures_StripEnumPrefix)
+	*p = x
+	return p
+}
+
+func (x GoFeatures_StripEnumPrefix) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor()
+}
+
+func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType {
+	return &file_google_protobuf_go_features_proto_enumTypes[0]
+}
+
+func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = GoFeatures_StripEnumPrefix(num)
+	return nil
+}
+
+// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead.
+func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0}
+}
+
 type GoFeatures struct {
 	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
 	unknownFields protoimpl.UnknownFields
 
 	// Whether or not to generate the deprecated UnmarshalJSON method for enums.
-	LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"`
+	LegacyUnmarshalJsonEnum *bool                       `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"`
+	StripEnumPrefix         *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"`
 }
 
 func (x *GoFeatures) Reset() {
@@ -64,6 +127,13 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool {
 	return false
 }
 
+func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix {
+	if x != nil && x.StripEnumPrefix != nil {
+		return *x.StripEnumPrefix
+	}
+	return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED
+}
+
 var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
 	{
 		ExtendedType:  (*descriptorpb.FeatureSet)(nil),
@@ -88,7 +158,7 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{
 	0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
 	0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
 	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f,
+	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x03, 0x0a, 0x0a, 0x47, 0x6f,
 	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
 	0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73,
 	0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01,
@@ -101,14 +171,31 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{
 	0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61,
 	0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
 	0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
-	0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12,
-	0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20,
-	0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
+	0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72,
+	0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03,
+	0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72,
+	0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x07, 0x98,
+	0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55,
+	0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, 0x45, 0x45, 0x50, 0x18, 0x84, 0x07,
+	0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75,
+	0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69,
+	0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53,
+	0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58,
+	0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a,
+	0x0a, 0x16, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45,
+	0x46, 0x49, 0x58, 0x5f, 0x4b, 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54,
+	0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f,
+	0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12,
+	0x1b, 0x0a, 0x17, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52,
+	0x45, 0x46, 0x49, 0x58, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02,
+	0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18,
+	0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67,
+	0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
 }
 
 var (
@@ -123,19 +210,22 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte {
 	return file_google_protobuf_go_features_proto_rawDescData
 }
 
+var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
 var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
 var file_google_protobuf_go_features_proto_goTypes = []any{
-	(*GoFeatures)(nil),              // 0: pb.GoFeatures
-	(*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet
+	(GoFeatures_StripEnumPrefix)(0), // 0: pb.GoFeatures.StripEnumPrefix
+	(*GoFeatures)(nil),              // 1: pb.GoFeatures
+	(*descriptorpb.FeatureSet)(nil), // 2: google.protobuf.FeatureSet
 }
 var file_google_protobuf_go_features_proto_depIdxs = []int32{
-	1, // 0: pb.go:extendee -> google.protobuf.FeatureSet
-	0, // 1: pb.go:type_name -> pb.GoFeatures
-	2, // [2:2] is the sub-list for method output_type
-	2, // [2:2] is the sub-list for method input_type
-	1, // [1:2] is the sub-list for extension type_name
-	0, // [0:1] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
+	0, // 0: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix
+	2, // 1: pb.go:extendee -> google.protobuf.FeatureSet
+	1, // 2: pb.go:type_name -> pb.GoFeatures
+	3, // [3:3] is the sub-list for method output_type
+	3, // [3:3] is the sub-list for method input_type
+	2, // [2:3] is the sub-list for extension type_name
+	1, // [1:2] is the sub-list for extension extendee
+	0, // [0:1] is the sub-list for field type_name
 }
 
 func init() { file_google_protobuf_go_features_proto_init() }
@@ -148,13 +238,14 @@ func file_google_protobuf_go_features_proto_init() {
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: file_google_protobuf_go_features_proto_rawDesc,
-			NumEnums:      0,
+			NumEnums:      1,
 			NumMessages:   1,
 			NumExtensions: 1,
 			NumServices:   0,
 		},
 		GoTypes:           file_google_protobuf_go_features_proto_goTypes,
 		DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs,
+		EnumInfos:         file_google_protobuf_go_features_proto_enumTypes,
 		MessageInfos:      file_google_protobuf_go_features_proto_msgTypes,
 		ExtensionInfos:    file_google_protobuf_go_features_proto_extTypes,
 	}.Build()
diff --git a/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/auth.go b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/auth.go
new file mode 100644
index 00000000..8f815119
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/auth.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+type Auth struct {
+	// Auth Secret
+	Secret *Secret `json:"secret,omitempty"`
+
+	// AccessKey is the AWS access key ID.
+	AccessKey string `json:"accessKey,omitempty"`
+
+	// SecretKey is the AWS secret access key.
+	SecretKey string `json:"secretKey,omitempty"`
+}
+
+func (a *Auth) HasAuth() bool {
+	return a != nil && a.Secret != nil &&
+		a.Secret.Ref != nil && a.Secret.Ref.Name != ""
+}
+
+type Secret struct {
+	// Secret reference for SASL and SSL configurations.
+	Ref *SecretReference `json:"ref,omitempty"`
+}
+
+type SecretReference struct {
+	// Secret name.
+	Name string `json:"name"`
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/aws.go b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/aws.go
new file mode 100644
index 00000000..7a77d57b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/aws.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+const (
+
+	// AwsAccessKey is the name of the expected key on the secret for accessing the actual AWS access key value.
+	AwsAccessKey = "aws.accessKey"
+	// AwsSecretKey is the name of the expected key on the secret for accessing the actual AWS secret key value.
+	AwsSecretKey = "aws.secretKey"
+)
+
+type AWSCommon struct {
+	// Auth is the S3 authentication (accessKey/secretKey) configuration.
+	Region              string `json:"region,omitempty"`                 // AWS region
+	URIEndpointOverride string `json:"uriEndpointOverride,omitempty"`    // Override endpoint URI
+	OverrideEndpoint    bool   `json:"overrideEndpoint" default:"false"` // Override endpoint flag
+}
+
+type AWSS3 struct {
+	AWSCommon               `json:",inline"` // Embeds AWSCommon to inherit its fields in JSON
+	Arn                     string           `json:"arn,omitempty" camel:"CAMEL_KAMELET_AWS_S3_SOURCE_BUCKETNAMEORARN"` // S3 ARN
+	DeleteAfterRead         bool             `json:"deleteAfterRead" default:"true"`                                    // Auto-delete objects after reading
+	MoveAfterRead           bool             `json:"moveAfterRead" default:"false"`                                     // Move objects after reading
+	DestinationBucket       string           `json:"destinationBucket,omitempty"`                                       // Destination bucket for moved objects
+	DestinationBucketPrefix string           `json:"destinationBucketPrefix,omitempty"`                                 // Prefix for moved objects
+	DestinationBucketSuffix string           `json:"destinationBucketSuffix,omitempty"`                                 // Suffix for moved objects
+	AutoCreateBucket        bool             `json:"autoCreateBucket" default:"false"`                                  // Auto-create S3 bucket
+	Prefix                  string           `json:"prefix,omitempty"`                                                  // S3 bucket prefix for search
+	IgnoreBody              bool             `json:"ignoreBody" default:"false"`                                        // Ignore object body
+	ForcePathStyle          bool             `json:"forcePathStyle" default:"false"`                                    // Force path style for bucket access
+	Delay                   int              `json:"delay" default:"500"`                                               // Delay between polls in milliseconds
+	MaxMessagesPerPoll      int              `json:"maxMessagesPerPoll" default:"10"`                                   // Max messages to poll per request
+}
+
+type AWSSQS struct {
+	AWSCommon          `json:",inline"` // Embeds AWSCommon to inherit its fields in JSON
+	Arn                string           `json:"arn,omitempty" camel:"CAMEL_KAMELET_AWS_SQS_SOURCE_QUEUENAMEORARN"`               // SQS ARN
+	DeleteAfterRead    bool             `json:"deleteAfterRead" default:"true"`                                                  // Auto-delete messages after reading
+	AutoCreateQueue    bool             `json:"autoCreateQueue" default:"false"`                                                 // Auto-create SQS queue
+	Host               string           `json:"host" camel:"CAMEL_KAMELET_AWS_SQS_SOURCE_AMAZONAWSHOST" default:"amazonaws.com"` // AWS host
+	Protocol           string           `json:"protocol" default:"https"`                                                        // Communication protocol (http/https)
+	QueueURL           string           `json:"queueURL,omitempty"`                                                              // Full SQS queue URL
+	Greedy             bool             `json:"greedy" default:"false"`                                                          // Greedy scheduler
+	Delay              int              `json:"delay" default:"500"`                                                             // Delay between polls in milliseconds
+	MaxMessagesPerPoll int              `json:"maxMessagesPerPoll" default:"1"`                                                  // Max messages to return (1-10)
+	WaitTimeSeconds    int              `json:"waitTimeSeconds,omitempty"`                                                       // Wait time for messages
+	VisibilityTimeout  int              `json:"visibilityTimeout,omitempty"`                                                     // Visibility timeout in seconds
+}
+
+type AWSDDBStreams struct {
+	AWSCommon          `json:",inline"` // Embeds AWSCommon to inherit its fields in JSON
+	Table              string           `json:"table,omitempty"`                                    // The name of the DynamoDB table
+	StreamIteratorType string           `json:"streamIteratorType,omitempty" default:"FROM_LATEST"` // Defines where in the DynamoDB stream to start getting records
+	Delay              int              `json:"delay,omitempty" default:"500"`                      // Delay in milliseconds before the next poll from the database
+}
+
+type AWSSNS struct {
+	AWSCommon       `json:",inline"` // Embeds AWSCommon to inherit its fields in JSON
+	Arn             string           `json:"arn,omitempty" camel:"CAMEL_KAMELET_AWS_SNS_SINK_TOPICNAMEORARN"` // SNS ARN
+	AutoCreateTopic bool             `json:"autoCreateTopic" default:"false"`                                 // Auto-create SNS topic
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/doc.go b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/doc.go
new file mode 100644
index 00000000..3366df67
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+
+package v1alpha1
diff --git a/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 00000000..9cf35396
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,164 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSCommon) DeepCopyInto(out *AWSCommon) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCommon.
+func (in *AWSCommon) DeepCopy() *AWSCommon {
+	if in == nil {
+		return nil
+	}
+	out := new(AWSCommon)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSDDBStreams) DeepCopyInto(out *AWSDDBStreams) {
+	*out = *in
+	out.AWSCommon = in.AWSCommon
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSDDBStreams.
+func (in *AWSDDBStreams) DeepCopy() *AWSDDBStreams {
+	if in == nil {
+		return nil
+	}
+	out := new(AWSDDBStreams)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSS3) DeepCopyInto(out *AWSS3) {
+	*out = *in
+	out.AWSCommon = in.AWSCommon
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSS3.
+func (in *AWSS3) DeepCopy() *AWSS3 {
+	if in == nil {
+		return nil
+	}
+	out := new(AWSS3)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSSNS) DeepCopyInto(out *AWSSNS) {
+	*out = *in
+	out.AWSCommon = in.AWSCommon
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSNS.
+func (in *AWSSNS) DeepCopy() *AWSSNS {
+	if in == nil {
+		return nil
+	}
+	out := new(AWSSNS)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSSQS) DeepCopyInto(out *AWSSQS) {
+	*out = *in
+	out.AWSCommon = in.AWSCommon
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSQS.
+func (in *AWSSQS) DeepCopy() *AWSSQS {
+	if in == nil {
+		return nil
+	}
+	out := new(AWSSQS)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Auth) DeepCopyInto(out *Auth) {
+	*out = *in
+	if in.Secret != nil {
+		in, out := &in.Secret, &out.Secret
+		*out = new(Secret)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Auth.
+func (in *Auth) DeepCopy() *Auth {
+	if in == nil {
+		return nil
+	}
+	out := new(Auth)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Secret) DeepCopyInto(out *Secret) {
+	*out = *in
+	if in.Ref != nil {
+		in, out := &in.Ref, &out.Ref
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret.
+func (in *Secret) DeepCopy() *Secret {
+	if in == nil {
+		return nil
+	}
+	out := new(Secret)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretReference) DeepCopyInto(out *SecretReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
+func (in *SecretReference) DeepCopy() *SecretReference {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretReference)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/register.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/register.go
index c6f3e98c..c891f7c7 100644
--- a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/register.go
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/register.go
@@ -47,6 +47,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
 	scheme.AddKnownTypes(SchemeGroupVersion,
 		&EventPolicy{},
 		&EventPolicyList{},
+		&RequestReply{},
+		&RequestReplyList{},
 	)
 	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
 	return nil
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_conversion.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_conversion.go
new file mode 100644
index 00000000..739e922c
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"context"
+	"fmt"
+
+	"knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (ep *RequestReply) ConvertTo(ctx context.Context, obj apis.Convertible) error {
+	return fmt.Errorf("v1alpha1 is the highest known version, got: %T", obj)
+}
+
+// ConvertFrom implements apis.Convertible
+func (ep *RequestReply) ConvertFrom(ctx context.Context, obj apis.Convertible) error {
+	return fmt.Errorf("v1alpha1 is the highest known version, got: %T", obj)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_defaults.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_defaults.go
new file mode 100644
index 00000000..c05f915d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_defaults.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"context"
+
+	"k8s.io/utils/ptr"
+	"knative.dev/eventing/pkg/apis/feature"
+	"knative.dev/pkg/apis"
+)
+
+func (rr *RequestReply) SetDefaults(ctx context.Context) {
+	ctx = apis.WithinParent(ctx, rr.ObjectMeta)
+	rr.Spec.SetDefaults(ctx)
+}
+
+func (rrs *RequestReplySpec) SetDefaults(ctx context.Context) {
+	if rrs.Timeout == nil || *rrs.Timeout == "" {
+		rrs.Timeout = ptr.To(feature.FromContextOrDefaults(ctx).RequestReplyDefaultTimeout())
+	}
+
+	if rrs.CorrelationAttribute == "" {
+		rrs.CorrelationAttribute = "correlationid"
+	}
+
+	if rrs.ReplyAttribute == "" {
+		rrs.ReplyAttribute = "replyid"
+	}
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_lifecycle.go
new file mode 100644
index 00000000..67014229
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_lifecycle.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"knative.dev/pkg/apis"
+	v1 "knative.dev/pkg/apis/duck/v1"
+)
+
+var requestReplyCondSet = apis.NewLivingConditionSet(RequestReplyConditionIngress, RequestReplyConditionTriggers, RequestReplyConditionAddressable, RequestReplyConditionEventPoliciesReady)
+
+const (
+	RequestReplyConditionReady                                 = apis.ConditionReady
+	RequestReplyConditionIngress            apis.ConditionType = "IngressReady"
+	RequestReplyConditionTriggers           apis.ConditionType = "TriggersReady"
+	RequestReplyConditionAddressable        apis.ConditionType = "Addressable"
+	RequestReplyConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*RequestReply) GetConditionSet() apis.ConditionSet {
+	return requestReplyCondSet
+}
+
+func (*RequestReplyStatus) GetConditionSet() apis.ConditionSet {
+	return requestReplyCondSet
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (rr *RequestReplyStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+	return requestReplyCondSet.Manage(rr).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (rr *RequestReplyStatus) IsReady() bool {
+	return rr.GetTopLevelCondition().IsTrue()
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (rr *RequestReplyStatus) GetTopLevelCondition() *apis.Condition {
+	return requestReplyCondSet.Manage(rr).GetTopLevelCondition()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (rr *RequestReplyStatus) InitializeConditions() {
+	requestReplyCondSet.Manage(rr).InitializeConditions()
+}
+
+func (rr *RequestReplyStatus) SetAddress(address *v1.Addressable) {
+	rr.AddressStatus = v1.AddressStatus{
+		Address: address,
+	}
+
+	if address != nil && address.URL != nil {
+		rr.GetConditionSet().Manage(rr).MarkTrue(RequestReplyConditionAddressable)
+		rr.AddressStatus.Address.Name = &address.URL.Scheme
+	} else {
+		rr.GetConditionSet().Manage(rr).MarkFalse(RequestReplyConditionAddressable, "nil URL", "URL is nil")
+	}
+}
+
+func (rr *RequestReplyStatus) MarkTriggersReady() {
+	rr.GetConditionSet().Manage(rr).MarkTrue(RequestReplyConditionTriggers)
+}
+
+func (rr *RequestReplyStatus) MarkTriggersNotReadyWithReason(reason, messageFormat string, messageA ...interface{}) {
+	rr.GetConditionSet().Manage(rr).MarkUnknown(RequestReplyConditionTriggers, reason, messageFormat, messageA...)
+}
+
+func (rr *RequestReplyStatus) MarkIngressReady() {
+	rr.GetConditionSet().Manage(rr).MarkTrue(RequestReplyConditionIngress)
+}
+
+func (rr *RequestReplyStatus) MarkIngressNotReadyWithReason(reason, messageFormat string, messageA ...interface{}) {
+	rr.GetConditionSet().Manage(rr).MarkUnknown(RequestReplyConditionIngress, reason, messageFormat, messageA...)
+}
+
+func (rr *RequestReplyStatus) MarkEventPoliciesTrue() {
+	rr.GetConditionSet().Manage(rr).MarkTrue(RequestReplyConditionEventPoliciesReady)
+}
+
+func (rr *RequestReplyStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+	rr.GetConditionSet().Manage(rr).MarkTrueWithReason(RequestReplyConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (rr *RequestReplyStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+	rr.GetConditionSet().Manage(rr).MarkFalse(RequestReplyConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (rr *RequestReplyStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+	rr.GetConditionSet().Manage(rr).MarkUnknown(RequestReplyConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_types.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_types.go
new file mode 100644
index 00000000..3cb37e11
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_types.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+
+	"knative.dev/pkg/apis"
+	duckv1 "knative.dev/pkg/apis/duck/v1"
+	"knative.dev/pkg/kmeta"
+
+	eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RequestRepluy represents synchronous interface to sending and receiving events from a Broker.
+type RequestReply struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	// Spec defines the desired state of the EventPolicy.
+	Spec RequestReplySpec `json:"spec,omitempty"`
+
+	// Status represents the current state of the EventPolicy.
+	// This data may be out of date.
+	// +optional
+	Status RequestReplyStatus `json:"status,omitempty"`
+}
+
+var (
+	// Check that EventPolicy can be validated, can be defaulted, and has immutable fields.
+	_ apis.Validatable = (*RequestReply)(nil)
+	_ apis.Defaultable = (*RequestReply)(nil)
+
+	// Check that EventPolicy can return its spec untyped.
+	_ apis.HasSpec = (*RequestReply)(nil)
+
+	_ runtime.Object = (*RequestReply)(nil)
+
+	// Check that we can create OwnerReferences to an EventPolicy.
+	_ kmeta.OwnerRefable = (*RequestReply)(nil)
+
+	// Check that the type conforms to the duck Knative Resource shape.
+	_ duckv1.KRShaped = (*RequestReply)(nil)
+)
+
+type RequestReplySpec struct {
+	// BrokerRef contains the reference to the broker the RequestReply sends events to.
+	BrokerRef duckv1.KReference `json:"brokerRef"`
+
+	CorrelationAttribute string `json:"correlationAttribute"`
+
+	ReplyAttribute string `json:"replyAttribute"`
+
+	Timeout *string `json:"timeout,omitempty"`
+
+	Delivery *eventingduckv1.DeliverySpec `json:"delivery,omitempty"`
+
+	Secrets []string `json:"secrets"`
+}
+
+// RequestReplyStatus represents the current state of a RequestReply.
+type RequestReplyStatus struct {
+	// inherits duck/v1 Status, which currently provides:
+	// * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+	// * Conditions - the latest available observations of a resource's current state.
+	duckv1.Status `json:",inline"`
+
+	// AddressStatus is the part where the RequestReply fulfills the Addressable contract.
+	// It exposes the endpoint as an URI to get events delivered.
+	// +optional
+	duckv1.AddressStatus `json:",inline"`
+
+	// AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker.
+	// +optional
+	eventingduckv1.AppliedEventPoliciesStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RequestReplyList is a collection of RequestReplies.
+type RequestReplyList struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []RequestReply `json:"items"`
+}
+
+// GetGroupVersionKind returns GroupVersionKind for EventPolicy
+func (rr *RequestReply) GetGroupVersionKind() schema.GroupVersionKind {
+	return SchemeGroupVersion.WithKind("RequestReply")
+}
+
+// GetUntypedSpec returns the spec of the EventPolicy.
+func (rr *RequestReply) GetUntypedSpec() interface{} {
+	return rr.Spec
+}
+
+// GetStatus retrieves the status of the EventPolicy. Implements the KRShaped interface.
+func (rr *RequestReply) GetStatus() *duckv1.Status {
+	return &rr.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_validation.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_validation.go
new file mode 100644
index 00000000..693c5a78
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_validation.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"context"
+	"strings"
+
+	"github.com/rickb777/date/period"
+
+	"knative.dev/pkg/apis"
+)
+
+func (rr *RequestReply) Validate(ctx context.Context) *apis.FieldError {
+	ctx = apis.WithinParent(ctx, rr.ObjectMeta)
+	return rr.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (rrs *RequestReplySpec) Validate(ctx context.Context) *apis.FieldError {
+	var errs *apis.FieldError
+
+	if ke := rrs.BrokerRef.Validate(ctx); ke != nil {
+		errs = errs.Also(ke.ViaField("brokerRef"))
+	}
+
+	if !strings.EqualFold(rrs.BrokerRef.Kind, "broker") {
+		errs = errs.Also(apis.ErrInvalidValue(rrs.BrokerRef.Kind, ".kind", "brokerRef kind must be Broker").ViaField("brokerRef"))
+	}
+
+	if rrs.BrokerRef.Namespace != "" {
+		errs = errs.Also(apis.ErrDisallowedFields("namespace").ViaField("brokerRef"))
+	}
+
+	if rrs.Delivery != nil {
+		if de := rrs.Delivery.Validate(ctx); de != nil {
+			errs = errs.Also(de.ViaField("delivery"))
+		}
+	}
+
+	if rrs.Timeout != nil {
+		timeout, err := period.Parse(*rrs.Timeout)
+		if err != nil || timeout.IsZero() || timeout.IsNegative() {
+			errs = errs.Also(apis.ErrInvalidValue(*rrs.Timeout, "timeout"))
+		}
+
+	}
+
+	if len(rrs.Secrets) == 0 {
+		errs = errs.Also(apis.ErrInvalidValue(rrs.Secrets, "secrets", "one or more secrets must be provided"))
+	}
+
+	if rrs.CorrelationAttribute == "" ||
+		rrs.CorrelationAttribute == "id" ||
+		rrs.CorrelationAttribute == "course" ||
+		rrs.CorrelationAttribute == "specversion" ||
+		rrs.CorrelationAttribute == "type" {
+		errs = errs.Also(apis.ErrInvalidValue(rrs.CorrelationAttribute, "correlationattribute", "correlationattribute must be non-empty and cannot be a core cloudevent attribute (id, type, specversion, source)"))
+	}
+
+	if rrs.ReplyAttribute == "" ||
+		rrs.ReplyAttribute == "id" ||
+		rrs.ReplyAttribute == "course" ||
+		rrs.ReplyAttribute == "specversion" ||
+		rrs.ReplyAttribute == "type" {
+		errs = errs.Also(apis.ErrInvalidValue(rrs.ReplyAttribute, "replyattribute", "replyattribute must be non-empty and cannot be a core cloudevent attribute (id, type, specversion, source)"))
+	}
+
+	return errs
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
index 068369c5..4a30bba4 100644
--- a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
@@ -24,6 +24,7 @@ package v1alpha1
 import (
 	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	runtime "k8s.io/apimachinery/pkg/runtime"
+	duckv1 "knative.dev/eventing/pkg/apis/duck/v1"
 	eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
 )
 
@@ -256,3 +257,115 @@ func (in *EventPolicyToReference) DeepCopy() *EventPolicyToReference {
 	in.DeepCopyInto(out)
 	return out
 }
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequestReply) DeepCopyInto(out *RequestReply) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestReply.
+func (in *RequestReply) DeepCopy() *RequestReply {
+	if in == nil {
+		return nil
+	}
+	out := new(RequestReply)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RequestReply) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequestReplyList) DeepCopyInto(out *RequestReplyList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]RequestReply, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestReplyList.
+func (in *RequestReplyList) DeepCopy() *RequestReplyList {
+	if in == nil {
+		return nil
+	}
+	out := new(RequestReplyList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RequestReplyList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequestReplySpec) DeepCopyInto(out *RequestReplySpec) {
+	*out = *in
+	in.BrokerRef.DeepCopyInto(&out.BrokerRef)
+	if in.Timeout != nil {
+		in, out := &in.Timeout, &out.Timeout
+		*out = new(string)
+		**out = **in
+	}
+	if in.Delivery != nil {
+		in, out := &in.Delivery, &out.Delivery
+		*out = new(duckv1.DeliverySpec)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Secrets != nil {
+		in, out := &in.Secrets, &out.Secrets
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestReplySpec.
+func (in *RequestReplySpec) DeepCopy() *RequestReplySpec {
+	if in == nil {
+		return nil
+	}
+	out := new(RequestReplySpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequestReplyStatus) DeepCopyInto(out *RequestReplyStatus) {
+	*out = *in
+	in.Status.DeepCopyInto(&out.Status)
+	in.AddressStatus.DeepCopyInto(&out.AddressStatus)
+	in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestReplyStatus.
+func (in *RequestReplyStatus) DeepCopy() *RequestReplyStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(RequestReplyStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/feature/features.go b/vendor/knative.dev/eventing/pkg/apis/feature/features.go
index 98eeb290..cfe37914 100644
--- a/vendor/knative.dev/eventing/pkg/apis/feature/features.go
+++ b/vendor/knative.dev/eventing/pkg/apis/feature/features.go
@@ -67,6 +67,10 @@ const (
 
 	// DefaultOIDCDiscoveryURL is the default OIDC Discovery URL used in most Kubernetes clusters.
 	DefaultOIDCDiscoveryBaseURL Flag = "https://kubernetes.default.svc"
+
+	// DefaultRequestReplyTimeout is a value for RequestReplyDefaultTimeout that indicates to timeout
+	// a RequestReply resource after 30 seconds by default.
+	DefaultRequestReplyTimeout Flag = "30s"
 )
 
 // Flags is a map containing all the enabled/disabled flags for the experimental features.
@@ -75,16 +79,17 @@ type Flags map[string]Flag
 
 func newDefaults() Flags {
 	return map[string]Flag{
-		KReferenceGroup:          Disabled,
-		DeliveryRetryAfter:       Disabled,
-		DeliveryTimeout:          Enabled,
-		KReferenceMapping:        Disabled,
-		TransportEncryption:      Disabled,
-		OIDCAuthentication:       Disabled,
-		EvenTypeAutoCreate:       Disabled,
-		NewAPIServerFilters:      Disabled,
-		AuthorizationDefaultMode: AuthorizationAllowSameNamespace,
-		OIDCDiscoveryBaseURL:     DefaultOIDCDiscoveryBaseURL,
+		KReferenceGroup:            Disabled,
+		DeliveryRetryAfter:         Disabled,
+		DeliveryTimeout:            Enabled,
+		KReferenceMapping:          Disabled,
+		TransportEncryption:        Disabled,
+		OIDCAuthentication:         Disabled,
+		EvenTypeAutoCreate:         Disabled,
+		NewAPIServerFilters:        Disabled,
+		AuthorizationDefaultMode:   AuthorizationAllowSameNamespace,
+		OIDCDiscoveryBaseURL:       DefaultOIDCDiscoveryBaseURL,
+		RequestReplyDefaultTimeout: DefaultRequestReplyTimeout,
 	}
 }
 
@@ -151,6 +156,19 @@ func (e Flags) OIDCDiscoveryBaseURL() string {
 	return string(discoveryUrl)
 }
 
+func (e Flags) RequestReplyDefaultTimeout() string {
+	if e == nil {
+		return string(DefaultRequestReplyTimeout)
+	}
+
+	timeout, ok := e[RequestReplyDefaultTimeout]
+	if !ok {
+		return string(DefaultRequestReplyTimeout)
+	}
+
+	return string(timeout)
+}
+
 func (e Flags) String() string {
 	return fmt.Sprintf("%+v", map[string]Flag(e))
 }
diff --git a/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go b/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go
index e21056eb..ba163868 100644
--- a/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go
+++ b/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go
@@ -17,16 +17,17 @@ limitations under the License.
 package feature
 
 const (
-	KReferenceGroup          = "kreference-group"
-	DeliveryRetryAfter       = "delivery-retryafter"
-	DeliveryTimeout          = "delivery-timeout"
-	KReferenceMapping        = "kreference-mapping"
-	TransportEncryption      = "transport-encryption"
-	EvenTypeAutoCreate       = "eventtype-auto-create"
-	OIDCAuthentication       = "authentication-oidc"
-	NodeSelectorLabel        = "apiserversources-nodeselector-"
-	CrossNamespaceEventLinks = "cross-namespace-event-links"
-	NewAPIServerFilters      = "new-apiserversource-filters"
-	AuthorizationDefaultMode = "default-authorization-mode"
-	OIDCDiscoveryBaseURL     = "oidc-discovery-base-url"
+	KReferenceGroup            = "kreference-group"
+	DeliveryRetryAfter         = "delivery-retryafter"
+	DeliveryTimeout            = "delivery-timeout"
+	KReferenceMapping          = "kreference-mapping"
+	TransportEncryption        = "transport-encryption"
+	EvenTypeAutoCreate         = "eventtype-auto-create"
+	OIDCAuthentication         = "authentication-oidc"
+	NodeSelectorLabel          = "apiserversources-nodeselector-"
+	CrossNamespaceEventLinks   = "cross-namespace-event-links"
+	NewAPIServerFilters        = "new-apiserversource-filters"
+	AuthorizationDefaultMode   = "default-authorization-mode"
+	OIDCDiscoveryBaseURL       = "oidc-discovery-base-url"
+	RequestReplyDefaultTimeout = "requestreply-default-timeout"
 )
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go
index d45d1a97..35ef3988 100644
--- a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go
@@ -19,10 +19,11 @@ package v1
 import (
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
-	eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
 	"knative.dev/pkg/apis"
 	duckv1 "knative.dev/pkg/apis/duck/v1"
 	"knative.dev/pkg/kmeta"
+
+	eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
 )
 
 // +genclient
@@ -44,6 +45,14 @@ type InMemoryChannel struct {
 	Status InMemoryChannelStatus `json:"status,omitempty"`
 }
 
+var (
+	// AsyncHandlerAnnotation controls whether InMemoryChannel uses the async handler.
+	//
+	// Async handler is subject to event loss since it responds with 200 before forwarding the event
+	// to all subscriptions.
+	AsyncHandlerAnnotation = SchemeGroupVersion.Group + "/async-handler"
+)
+
 var (
 	// Check that InMemoryChannel can be validated and defaulted.
 	_ apis.Validatable = (*InMemoryChannel)(nil)
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/register.go b/vendor/knative.dev/eventing/pkg/apis/sinks/register.go
index 676fa75e..1994d579 100644
--- a/vendor/knative.dev/eventing/pkg/apis/sinks/register.go
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/register.go
@@ -33,6 +33,12 @@ var (
 		Group:    GroupName,
 		Resource: "jobsinks",
 	}
+
+	// IntegrationSinkResource respresents a Knative Eventing sink IntegrationSink
+	IntegrationSinkResource = schema.GroupResource{
+		Group:    GroupName,
+		Resource: "integrationsinks",
+	}
 )
 
 type Config struct {
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_conversion.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_conversion.go
new file mode 100644
index 00000000..8f41d2da
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_conversion.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"context"
+	"fmt"
+
+	"knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+// Converts source from v1alpha1.IntegrationSink into a higher version.
+func (sink *IntegrationSink) ConvertTo(ctx context.Context, obj apis.Convertible) error {
+	return fmt.Errorf("v1alpha1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+// Converts source from a higher version into v1beta2.IntegrationSink
+func (sink *IntegrationSink) ConvertFrom(ctx context.Context, obj apis.Convertible) error {
+	return fmt.Errorf("v1alpha1 is the highest known version, got: %T", sink)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_defaults.go
new file mode 100644
index 00000000..f77df267
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_defaults.go
@@ -0,0 +1,26 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "context"
+
+func (sink *IntegrationSink) SetDefaults(ctx context.Context) {
+	sink.Spec.SetDefaults(ctx)
+}
+
+func (sink *IntegrationSinkSpec) SetDefaults(ctx context.Context) {
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_lifecycle.go
new file mode 100644
index 00000000..1ad33e2c
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_lifecycle.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	appsv1 "k8s.io/api/apps/v1"
+	corev1 "k8s.io/api/core/v1"
+	"knative.dev/pkg/apis"
+	duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+const (
+	// IntegrationSinkConditionReady has status True when the IntegrationSink is ready to send events.
+	IntegrationSinkConditionReady = apis.ConditionReady
+
+	IntegrationSinkConditionAddressable apis.ConditionType = "Addressable"
+
+	// IntegrationSinkConditionDeploymentReady has status True when the IntegrationSink has been configured with a deployment.
+	IntegrationSinkConditionDeploymentReady apis.ConditionType = "DeploymentReady"
+
+	// IntegrationSinkConditionEventPoliciesReady has status True when all the applying EventPolicies for this
+	// IntegrationSink are ready.
+	IntegrationSinkConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
+)
+
+var IntegrationSinkCondSet = apis.NewLivingConditionSet(
+	IntegrationSinkConditionAddressable,
+	IntegrationSinkConditionDeploymentReady,
+	IntegrationSinkConditionEventPoliciesReady,
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*IntegrationSink) GetConditionSet() apis.ConditionSet {
+	return IntegrationSinkCondSet
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (s *IntegrationSinkStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+	return IntegrationSinkCondSet.Manage(s).GetCondition(t)
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (ps *IntegrationSinkStatus) GetTopLevelCondition() *apis.Condition {
+	return IntegrationSinkCondSet.Manage(ps).GetTopLevelCondition()
+}
+
+// IsReady returns true if the resource is ready overall.
+func (s *IntegrationSinkStatus) IsReady() bool {
+	return IntegrationSinkCondSet.Manage(s).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (s *IntegrationSinkStatus) InitializeConditions() {
+	IntegrationSinkCondSet.Manage(s).InitializeConditions()
+}
+
+// MarkAddressableReady marks the Addressable condition to True.
+func (s *IntegrationSinkStatus) MarkAddressableReady() {
+	IntegrationSinkCondSet.Manage(s).MarkTrue(IntegrationSinkConditionAddressable)
+}
+
+// MarkEventPoliciesFailed marks the EventPoliciesReady condition to False with the given reason and message.
+func (s *IntegrationSinkStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+	IntegrationSinkCondSet.Manage(s).MarkFalse(IntegrationSinkConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+// MarkEventPoliciesUnknown marks the EventPoliciesReady condition to Unknown with the given reason and message.
+func (s *IntegrationSinkStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+	IntegrationSinkCondSet.Manage(s).MarkUnknown(IntegrationSinkConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+// MarkEventPoliciesTrue marks the EventPoliciesReady condition to True.
+func (s *IntegrationSinkStatus) MarkEventPoliciesTrue() {
+	IntegrationSinkCondSet.Manage(s).MarkTrue(IntegrationSinkConditionEventPoliciesReady)
+}
+
+// MarkEventPoliciesTrueWithReason marks the EventPoliciesReady condition to True with the given reason and message.
+func (s *IntegrationSinkStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+	IntegrationSinkCondSet.Manage(s).MarkTrueWithReason(IntegrationSinkConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (s *IntegrationSinkStatus) PropagateDeploymentStatus(d *appsv1.DeploymentStatus) {
+	deploymentAvailableFound := false
+	for _, cond := range d.Conditions {
+		if cond.Type == appsv1.DeploymentAvailable {
+			deploymentAvailableFound = true
+			if cond.Status == corev1.ConditionTrue {
+				IntegrationSinkCondSet.Manage(s).MarkTrue(IntegrationSinkConditionDeploymentReady)
+			} else if cond.Status == corev1.ConditionFalse {
+				IntegrationSinkCondSet.Manage(s).MarkFalse(IntegrationSinkConditionDeploymentReady, cond.Reason, cond.Message)
+			} else if cond.Status == corev1.ConditionUnknown {
+				IntegrationSinkCondSet.Manage(s).MarkUnknown(IntegrationSinkConditionDeploymentReady, cond.Reason, cond.Message)
+			}
+		}
+	}
+	if !deploymentAvailableFound {
+		IntegrationSinkCondSet.Manage(s).MarkUnknown(IntegrationSinkConditionDeploymentReady, "DeploymentUnavailable", "The Deployment '%s' is unavailable.", d)
+	}
+}
+
+func (s *IntegrationSinkStatus) SetAddress(address *duckv1.Addressable) {
+	s.Address = address
+	if address == nil || address.URL.IsEmpty() {
+		IntegrationSinkCondSet.Manage(s).MarkFalse(IntegrationSinkConditionAddressable, "EmptyHostname", "hostname is the empty string")
+	} else {
+		IntegrationSinkCondSet.Manage(s).MarkTrue(IntegrationSinkConditionAddressable)
+
+	}
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_types.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_types.go
new file mode 100644
index 00000000..efc15e62
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_types.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"knative.dev/eventing/pkg/apis/common/integration/v1alpha1"
+	eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+	"knative.dev/pkg/apis"
+	duckv1 "knative.dev/pkg/apis/duck/v1"
+	"knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:defaulter-gen=true
+
+// IntegrationSink is the Schema for the IntegrationSink API.
+type IntegrationSink struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   IntegrationSinkSpec   `json:"spec,omitempty"`
+	Status IntegrationSinkStatus `json:"status,omitempty"`
+}
+
+// Check the interfaces that JobSink should be implementing.
+var (
+	_ runtime.Object     = (*IntegrationSink)(nil)
+	_ kmeta.OwnerRefable = (*IntegrationSink)(nil)
+	_ apis.Validatable   = (*IntegrationSink)(nil)
+	_ apis.Defaultable   = (*IntegrationSink)(nil)
+	_ apis.HasSpec       = (*IntegrationSink)(nil)
+	_ duckv1.KRShaped    = (*IntegrationSink)(nil)
+	_ apis.Convertible   = (*JobSink)(nil)
+)
+
+type IntegrationSinkSpec struct {
+	Aws *Aws `json:"aws,omitempty"` // AWS source configuration
+	Log *Log `json:"log,omitempty"` // Log sink configuration
+}
+
+type Log struct {
+	LoggerName          string `json:"loggerName,omitempty" default:"log-sink"`      // Name of the logging category to use
+	Level               string `json:"level,omitempty" default:"INFO"`               // Logging level to use
+	LogMask             bool   `json:"logMask,omitempty" default:"false"`            // Mask sensitive information in the log
+	Marker              string `json:"marker,omitempty"`                             // An optional Marker name to use
+	Multiline           bool   `json:"multiline,omitempty" default:"false"`          // If enabled, outputs each information on a newline
+	ShowAllProperties   bool   `json:"showAllProperties,omitempty" default:"false"`  // Show all of the exchange properties (both internal and custom)
+	ShowBody            bool   `json:"showBody,omitempty" default:"true"`            // Show the message body
+	ShowBodyType        bool   `json:"showBodyType,omitempty" default:"true"`        // Show the body Java type
+	ShowExchangePattern bool   `json:"showExchangePattern,omitempty" default:"true"` // Show the Message Exchange Pattern (MEP)
+	ShowHeaders         bool   `json:"showHeaders,omitempty" default:"false"`        // Show the headers received
+	ShowProperties      bool   `json:"showProperties,omitempty" default:"false"`     // Show the exchange properties (only custom)
+	ShowStreams         bool   `json:"showStreams,omitempty" default:"false"`        // Show the stream bodies
+	ShowCachedStreams   bool   `json:"showCachedStreams,omitempty" default:"true"`   // Show cached stream bodies
+}
+
+type Aws struct {
+	S3   *v1alpha1.AWSS3  `json:"s3,omitempty"`  // S3 source configuration
+	SQS  *v1alpha1.AWSSQS `json:"sqs,omitempty"` // SQS source configuration
+	SNS  *v1alpha1.AWSSNS `json:"sns,omitempty"` // SNS source configuration
+	Auth *v1alpha1.Auth   `json:"auth,omitempty"`
+}
+
+type IntegrationSinkStatus struct {
+	duckv1.Status `json:",inline"`
+
+	// AddressStatus is the part where the JobSink fulfills the Addressable contract.
+	// It exposes the endpoint as an URI to get events delivered.
+	// +optional
+	duckv1.AddressStatus `json:",inline"`
+
+	// AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this JobSink
+	// +optional
+	eventingduckv1.AppliedEventPoliciesStatus `json:",inline"`
+}
+
+// GetGroupVersionKind returns the GroupVersionKind.
+func (*IntegrationSink) GetGroupVersionKind() schema.GroupVersionKind {
+	return SchemeGroupVersion.WithKind("IntegrationSink")
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IntegrationSinkList contains a list of IntegrationSink
+type IntegrationSinkList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []IntegrationSink `json:"items"`
+}
+
+// GetUntypedSpec returns the spec of the IntegrationSink.
+func (c *IntegrationSink) GetUntypedSpec() interface{} {
+	return c.Spec
+}
+
+// GetStatus retrieves the status of the IntegrationSink. Implements the KRShaped interface.
+func (c *IntegrationSink) GetStatus() *duckv1.Status {
+	return &c.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_validation.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_validation.go
new file mode 100644
index 00000000..7f24d9ca
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_validation.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"context"
+
+	"knative.dev/pkg/apis"
+)
+
+func (sink *IntegrationSink) Validate(ctx context.Context) *apis.FieldError {
+	ctx = apis.WithinParent(ctx, sink.ObjectMeta)
+	return sink.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (spec *IntegrationSinkSpec) Validate(ctx context.Context) *apis.FieldError {
+	var errs *apis.FieldError
+
+	// Count how many fields are set to ensure mutual exclusivity
+	sinkSetCount := 0
+	if spec.Log != nil {
+		sinkSetCount++
+	}
+	if spec.Aws != nil {
+		if spec.Aws.S3 != nil {
+			sinkSetCount++
+		}
+		if spec.Aws.SQS != nil {
+			sinkSetCount++
+		}
+		if spec.Aws.SNS != nil {
+			sinkSetCount++
+		}
+	}
+
+	// Validate that only one sink field is set
+	if sinkSetCount > 1 {
+		errs = errs.Also(apis.ErrGeneric("only one sink type can be set", "spec"))
+	} else if sinkSetCount == 0 {
+		errs = errs.Also(apis.ErrGeneric("at least one sink type must be specified", "spec"))
+	}
+
+	// Only perform AWS-specific validation if exactly one AWS sink is configured
+	if sinkSetCount == 1 && spec.Aws != nil {
+		if spec.Aws.S3 != nil || spec.Aws.SQS != nil || spec.Aws.SNS != nil {
+			// Check that AWS Auth is properly configured
+			if !spec.Aws.Auth.HasAuth() {
+				errs = errs.Also(apis.ErrMissingField("aws.auth.secret.ref.name"))
+			}
+		}
+
+		// Additional validation for AWS S3 required fields
+		if spec.Aws.S3 != nil {
+			if spec.Aws.S3.Arn == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.s3.arn"))
+			}
+			if spec.Aws.S3.Region == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.s3.region"))
+			}
+		}
+
+		// Additional validation for AWS SQS required fields
+		if spec.Aws.SQS != nil {
+			if spec.Aws.SQS.Arn == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.sqs.arn"))
+			}
+			if spec.Aws.SQS.Region == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.sqs.region"))
+			}
+		}
+		// Additional validation for AWS SNS required fields
+		if spec.Aws.SNS != nil {
+			if spec.Aws.SNS.Arn == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.sns.arn"))
+			}
+			if spec.Aws.SNS.Region == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.sns.region"))
+			}
+		}
+	}
+
+	return errs
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go
index 13f62e86..3bd18fbf 100644
--- a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go
@@ -18,7 +18,48 @@ package v1alpha1
 
 import (
 	"context"
+
+	batchv1 "k8s.io/api/batch/v1"
+	corev1 "k8s.io/api/core/v1"
 )
 
 func (sink *JobSink) SetDefaults(ctx context.Context) {
+	if sink.Spec.Job != nil {
+		setBatchJobDefaults(sink.Spec.Job)
+	}
+}
+
+func setBatchJobDefaults(job *batchv1.Job) {
+	for i := range job.Spec.Template.Spec.Containers {
+		executionModeFound := false
+		for j := range job.Spec.Template.Spec.Containers[i].Env {
+			if job.Spec.Template.Spec.Containers[i].Env[j].Name == ExecutionModeEnvVar {
+				executionModeFound = true
+				break
+			}
+		}
+		if executionModeFound {
+			continue
+		}
+		job.Spec.Template.Spec.Containers[i].Env = append(job.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{
+			Name:  ExecutionModeEnvVar,
+			Value: string(ExecutionModeBatch),
+		})
+	}
+	for i := range job.Spec.Template.Spec.InitContainers {
+		executionModeFound := false
+		for j := range job.Spec.Template.Spec.InitContainers[i].Env {
+			if job.Spec.Template.Spec.InitContainers[i].Env[j].Name == ExecutionModeEnvVar {
+				executionModeFound = true
+				break
+			}
+		}
+		if executionModeFound {
+			continue
+		}
+		job.Spec.Template.Spec.InitContainers[i].Env = append(job.Spec.Template.Spec.InitContainers[i].Env, corev1.EnvVar{
+			Name:  ExecutionModeEnvVar,
+			Value: string(ExecutionModeBatch),
+		})
+	}
 }
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go
index 857e9f79..5cc3a0fa 100644
--- a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go
@@ -22,9 +22,20 @@ import (
 
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
-	eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
 	duckv1 "knative.dev/pkg/apis/duck/v1"
 	"knative.dev/pkg/kmeta"
+
+	eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+)
+
+const (
+	ExecutionModeEnvVar = "K_EXECUTION_MODE"
+)
+
+type ExecutionMode string
+
+const (
+	ExecutionModeBatch ExecutionMode = "batch"
 )
 
 // +genclient
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go
index 827ebc28..89e4f3fb 100644
--- a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go
@@ -47,6 +47,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
 	scheme.AddKnownTypes(SchemeGroupVersion,
 		&JobSink{},
 		&JobSinkList{},
+		&IntegrationSink{},
+		&IntegrationSinkList{},
 	)
 	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
 	return nil
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go
index 58c9fdfa..a0265e79 100644
--- a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go
@@ -24,8 +24,151 @@ package v1alpha1
 import (
 	v1 "k8s.io/api/batch/v1"
 	runtime "k8s.io/apimachinery/pkg/runtime"
+	integrationv1alpha1 "knative.dev/eventing/pkg/apis/common/integration/v1alpha1"
 )
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Aws) DeepCopyInto(out *Aws) {
+	*out = *in
+	if in.S3 != nil {
+		in, out := &in.S3, &out.S3
+		*out = new(integrationv1alpha1.AWSS3)
+		**out = **in
+	}
+	if in.SQS != nil {
+		in, out := &in.SQS, &out.SQS
+		*out = new(integrationv1alpha1.AWSSQS)
+		**out = **in
+	}
+	if in.SNS != nil {
+		in, out := &in.SNS, &out.SNS
+		*out = new(integrationv1alpha1.AWSSNS)
+		**out = **in
+	}
+	if in.Auth != nil {
+		in, out := &in.Auth, &out.Auth
+		*out = new(integrationv1alpha1.Auth)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Aws.
+func (in *Aws) DeepCopy() *Aws {
+	if in == nil {
+		return nil
+	}
+	out := new(Aws)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntegrationSink) DeepCopyInto(out *IntegrationSink) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSink.
+func (in *IntegrationSink) DeepCopy() *IntegrationSink {
+	if in == nil {
+		return nil
+	}
+	out := new(IntegrationSink)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IntegrationSink) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntegrationSinkList) DeepCopyInto(out *IntegrationSinkList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]IntegrationSink, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSinkList.
+func (in *IntegrationSinkList) DeepCopy() *IntegrationSinkList {
+	if in == nil {
+		return nil
+	}
+	out := new(IntegrationSinkList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IntegrationSinkList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntegrationSinkSpec) DeepCopyInto(out *IntegrationSinkSpec) {
+	*out = *in
+	if in.Aws != nil {
+		in, out := &in.Aws, &out.Aws
+		*out = new(Aws)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Log != nil {
+		in, out := &in.Log, &out.Log
+		*out = new(Log)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSinkSpec.
+func (in *IntegrationSinkSpec) DeepCopy() *IntegrationSinkSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(IntegrationSinkSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntegrationSinkStatus) DeepCopyInto(out *IntegrationSinkStatus) {
+	*out = *in
+	in.Status.DeepCopyInto(&out.Status)
+	in.AddressStatus.DeepCopyInto(&out.AddressStatus)
+	in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSinkStatus.
+func (in *IntegrationSinkStatus) DeepCopy() *IntegrationSinkStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(IntegrationSinkStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *JobSink) DeepCopyInto(out *JobSink) {
 	*out = *in
@@ -143,3 +286,19 @@ func (in *JobStatus) DeepCopy() *JobStatus {
 	in.DeepCopyInto(out)
 	return out
 }
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Log) DeepCopyInto(out *Log) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Log.
+func (in *Log) DeepCopy() *Log {
+	if in == nil {
+		return nil
+	}
+	out := new(Log)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/register.go b/vendor/knative.dev/eventing/pkg/apis/sources/register.go
index 04716c8c..3571d0a1 100644
--- a/vendor/knative.dev/eventing/pkg/apis/sources/register.go
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/register.go
@@ -56,4 +56,10 @@ var (
 		Group:    GroupName,
 		Resource: "containersources",
 	}
+
+	// IntegrationSourceResource respresents a Knative Eventing Sources IntegrationSource
+	IntegrationSourceResource = schema.GroupResource{
+		Group:    GroupName,
+		Resource: "integrationsources",
+	}
 )
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/doc.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/doc.go
new file mode 100644
index 00000000..76cd299b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1alpha1 contains API Schema definitions for the sources v1alpha1 API group.
+// +k8s:deepcopy-gen=package
+// +groupName=sources.knative.dev
+package v1alpha1
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_conversion.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_conversion.go
new file mode 100644
index 00000000..32bfc7cc
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_conversion.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"context"
+	"fmt"
+
+	"knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+// Converts source from v1alpha1.IntegrationSource into a higher version.
+func (source *IntegrationSource) ConvertTo(ctx context.Context, obj apis.Convertible) error {
+	return fmt.Errorf("v1alpha1 is the highest known version, got: %T", source)
+}
+
+// ConvertFrom implements apis.Convertible
+// Converts source from a higher version into v1beta2.IntegrationSource
+func (source *IntegrationSource) ConvertFrom(ctx context.Context, obj apis.Convertible) error {
+	return fmt.Errorf("v1alpha1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_defaults.go
new file mode 100644
index 00000000..70d82c73
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_defaults.go
@@ -0,0 +1,26 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "context"
+
+func (source *IntegrationSource) SetDefaults(ctx context.Context) {
+	source.Spec.SetDefaults(ctx)
+}
+
+func (source *IntegrationSourceSpec) SetDefaults(ctx context.Context) {
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_lifecycle.go
new file mode 100644
index 00000000..cc1270fb
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_lifecycle.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	v1 "knative.dev/eventing/pkg/apis/sources/v1"
+	"knative.dev/pkg/apis"
+)
+
+const (
+	// IntegrationSourceConditionReady has status True when the IntegrationSource is ready to send events.
+	IntegrationSourceConditionReady = apis.ConditionReady
+
+	// IntegrationSourceConditionContainerSourceReady has status True when the IntegrationSource's ContainerSource is ready.
+	IntegrationSourceConditionContainerSourceReady apis.ConditionType = "ContainerSourceReady"
+)
+
+var IntegrationCondSet = apis.NewLivingConditionSet(
+	IntegrationSourceConditionContainerSourceReady,
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*IntegrationSource) GetConditionSet() apis.ConditionSet {
+	return IntegrationCondSet
+}
+
+// GetTopLevelCondition returns the top level condition.
+func (s *IntegrationSourceStatus) GetTopLevelCondition() *apis.Condition {
+	return IntegrationCondSet.Manage(s).GetTopLevelCondition()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (s *IntegrationSourceStatus) InitializeConditions() {
+	IntegrationCondSet.Manage(s).InitializeConditions()
+}
+
+func (iss *IntegrationSourceStatus) IsReady() bool {
+	return IntegrationCondSet.Manage(iss).IsHappy()
+}
+
+func (s *IntegrationSourceStatus) PropagateContainerSourceStatus(status *v1.ContainerSourceStatus) {
+	// ContainerSource status has all we need, hence deep copy it.
+	s.SourceStatus = *status.SourceStatus.DeepCopy()
+
+	cond := status.GetCondition(apis.ConditionReady)
+	switch {
+	case cond == nil:
+		IntegrationCondSet.Manage(s).MarkUnknown(IntegrationSourceConditionContainerSourceReady, "", "")
+	case cond.Status == corev1.ConditionTrue:
+		IntegrationCondSet.Manage(s).MarkTrue(IntegrationSourceConditionContainerSourceReady)
+	case cond.Status == corev1.ConditionFalse:
+		IntegrationCondSet.Manage(s).MarkFalse(IntegrationSourceConditionContainerSourceReady, cond.Reason, cond.Message)
+	case cond.Status == corev1.ConditionUnknown:
+		IntegrationCondSet.Manage(s).MarkUnknown(IntegrationSourceConditionContainerSourceReady, cond.Reason, cond.Message)
+	default:
+		IntegrationCondSet.Manage(s).MarkUnknown(IntegrationSourceConditionContainerSourceReady, cond.Reason, cond.Message)
+	}
+
+	// Propagate ContainerSources AuthStatus to IntegrationSources AuthStatus
+	s.Auth = status.Auth
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_types.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_types.go
new file mode 100644
index 00000000..2eec773b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_types.go
@@ -0,0 +1,113 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"knative.dev/eventing/pkg/apis/common/integration/v1alpha1"
+	"knative.dev/pkg/apis"
+	duckv1 "knative.dev/pkg/apis/duck/v1"
+	"knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IntegrationSource is the Schema for the Integrationsources API
+type IntegrationSource struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   IntegrationSourceSpec   `json:"spec,omitempty"`
+	Status IntegrationSourceStatus `json:"status,omitempty"`
+}
+
+var (
+	_ runtime.Object     = (*IntegrationSource)(nil)
+	_ kmeta.OwnerRefable = (*IntegrationSource)(nil)
+	_ apis.Validatable   = (*IntegrationSource)(nil)
+	_ apis.Defaultable   = (*IntegrationSource)(nil)
+	_ apis.HasSpec       = (*IntegrationSource)(nil)
+	_ duckv1.KRShaped    = (*IntegrationSource)(nil)
+	_ apis.Convertible   = (*IntegrationSource)(nil)
+)
+
+// IntegrationSourceSpec defines the desired state of IntegrationSource
+type IntegrationSourceSpec struct {
+	// inherits duck/v1 SourceSpec, which currently provides:
+	// * Sink - a reference to an object that will resolve to a domain name or
+	//   a URI directly to use as the sink.
+	// * CloudEventOverrides - defines overrides to control the output format
+	//   and modifications of the event sent to the sink.
+	duckv1.SourceSpec `json:",inline"`
+
+	Aws   *Aws   `json:"aws,omitempty"`   // AWS source configuration
+	Timer *Timer `json:"timer,omitempty"` // Timer configuration
+}
+
+type Timer struct {
+	Period      int    `json:"period" default:"1000"`            // Interval (in milliseconds) between producing messages
+	Message     string `json:"message"`                          // Message to generate
+	ContentType string `json:"contentType" default:"text/plain"` // Content type of generated message
+	RepeatCount int    `json:"repeatCount,omitempty"`            // Max number of fires (optional)
+}
+
+type Aws struct {
+	S3         *v1alpha1.AWSS3         `json:"s3,omitempty"`         // S3 source configuration
+	SQS        *v1alpha1.AWSSQS        `json:"sqs,omitempty"`        // SQS source configuration
+	DDBStreams *v1alpha1.AWSDDBStreams `json:"ddbStreams,omitempty"` // DynamoDB Streams source configuration
+	Auth       *v1alpha1.Auth          `json:"auth,omitempty"`
+}
+
+// GetGroupVersionKind returns the GroupVersionKind.
+func (*IntegrationSource) GetGroupVersionKind() schema.GroupVersionKind {
+	return SchemeGroupVersion.WithKind("IntegrationSource")
+}
+
+// IntegrationSourceStatus defines the observed state of IntegrationSource
+type IntegrationSourceStatus struct {
+	// inherits duck/v1 SourceStatus, which currently provides:
+	// * ObservedGeneration - the 'Generation' of the Service that was last
+	//   processed by the controller.
+	// * Conditions - the latest available observations of a resource's current
+	//   state.
+	// * SinkURI - the current active sink URI that has been configured for the
+	//   Source.
+	duckv1.SourceStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IntegrationSourceList contains a list of IntegrationSource
+type IntegrationSourceList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []IntegrationSource `json:"items"`
+}
+
+// GetUntypedSpec returns the spec of the IntegrationSource.
+func (c *IntegrationSource) GetUntypedSpec() interface{} {
+	return c.Spec
+}
+
+// GetStatus retrieves the status of the IntegrationSource. Implements the KRShaped interface.
+func (c *IntegrationSource) GetStatus() *duckv1.Status {
+	return &c.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_validation.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_validation.go
new file mode 100644
index 00000000..935e18e3
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_validation.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"context"
+
+	"knative.dev/pkg/apis"
+)
+
+func (source *IntegrationSource) Validate(ctx context.Context) *apis.FieldError {
+	ctx = apis.WithinParent(ctx, source.ObjectMeta)
+	return source.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (spec *IntegrationSourceSpec) Validate(ctx context.Context) *apis.FieldError {
+	var errs *apis.FieldError
+
+	// Count how many fields are set to ensure mutual exclusivity
+	sourceSetCount := 0
+	if spec.Timer != nil {
+		sourceSetCount++
+	}
+	if spec.Aws != nil {
+		if spec.Aws.S3 != nil {
+			sourceSetCount++
+		}
+		if spec.Aws.SQS != nil {
+			sourceSetCount++
+		}
+		if spec.Aws.DDBStreams != nil {
+			sourceSetCount++
+		}
+	}
+
+	// Validate that only one source field is set
+	if sourceSetCount > 1 {
+		errs = errs.Also(apis.ErrGeneric("only one source type can be set", "spec"))
+	} else if sourceSetCount == 0 {
+		errs = errs.Also(apis.ErrGeneric("at least one source type must be specified", "spec"))
+	}
+
+	// Only perform AWS-specific validation if exactly one AWS source is configured
+	if sourceSetCount == 1 && spec.Aws != nil {
+		if spec.Aws.S3 != nil || spec.Aws.SQS != nil || spec.Aws.DDBStreams != nil {
+			// Check that AWS Auth is properly configured
+			if !spec.Aws.Auth.HasAuth() {
+				errs = errs.Also(apis.ErrMissingField("aws.auth.secret.ref.name"))
+			}
+		}
+
+		// Additional validation for AWS S3 required fields
+		if spec.Aws.S3 != nil {
+			if spec.Aws.S3.Arn == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.s3.arn"))
+			}
+			if spec.Aws.S3.Region == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.s3.region"))
+			}
+		}
+
+		// Additional validation for AWS SQS required fields
+		if spec.Aws.SQS != nil {
+			if spec.Aws.SQS.Arn == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.sqs.arn"))
+			}
+			if spec.Aws.SQS.Region == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.sqs.region"))
+			}
+		}
+
+		// Additional validation for AWS DDBStreams required fields
+		if spec.Aws.DDBStreams != nil {
+			if spec.Aws.DDBStreams.Table == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.ddb-streams.table"))
+			}
+			if spec.Aws.DDBStreams.Region == "" {
+				errs = errs.Also(apis.ErrMissingField("aws.ddb-streams.region"))
+			}
+		}
+	}
+
+	return errs
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/register.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/register.go
new file mode 100644
index 00000000..a812c885
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/register.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"knative.dev/eventing/pkg/apis/sources"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: sources.GroupName, Version: "v1alpha1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&IntegrationSource{},
+		&IntegrationSourceList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 00000000..7e2857fd
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,184 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	integrationv1alpha1 "knative.dev/eventing/pkg/apis/common/integration/v1alpha1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Aws) DeepCopyInto(out *Aws) {
+	*out = *in
+	if in.S3 != nil {
+		in, out := &in.S3, &out.S3
+		*out = new(integrationv1alpha1.AWSS3)
+		**out = **in
+	}
+	if in.SQS != nil {
+		in, out := &in.SQS, &out.SQS
+		*out = new(integrationv1alpha1.AWSSQS)
+		**out = **in
+	}
+	if in.DDBStreams != nil {
+		in, out := &in.DDBStreams, &out.DDBStreams
+		*out = new(integrationv1alpha1.AWSDDBStreams)
+		**out = **in
+	}
+	if in.Auth != nil {
+		in, out := &in.Auth, &out.Auth
+		*out = new(integrationv1alpha1.Auth)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Aws.
+func (in *Aws) DeepCopy() *Aws {
+	if in == nil {
+		return nil
+	}
+	out := new(Aws)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntegrationSource) DeepCopyInto(out *IntegrationSource) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSource.
+func (in *IntegrationSource) DeepCopy() *IntegrationSource {
+	if in == nil {
+		return nil
+	}
+	out := new(IntegrationSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IntegrationSource) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntegrationSourceList) DeepCopyInto(out *IntegrationSourceList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]IntegrationSource, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSourceList.
+func (in *IntegrationSourceList) DeepCopy() *IntegrationSourceList {
+	if in == nil {
+		return nil
+	}
+	out := new(IntegrationSourceList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IntegrationSourceList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntegrationSourceSpec) DeepCopyInto(out *IntegrationSourceSpec) {
+	*out = *in
+	in.SourceSpec.DeepCopyInto(&out.SourceSpec)
+	if in.Aws != nil {
+		in, out := &in.Aws, &out.Aws
+		*out = new(Aws)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Timer != nil {
+		in, out := &in.Timer, &out.Timer
+		*out = new(Timer)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSourceSpec.
+func (in *IntegrationSourceSpec) DeepCopy() *IntegrationSourceSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(IntegrationSourceSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntegrationSourceStatus) DeepCopyInto(out *IntegrationSourceStatus) {
+	*out = *in
+	in.SourceStatus.DeepCopyInto(&out.SourceStatus)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSourceStatus.
+func (in *IntegrationSourceStatus) DeepCopy() *IntegrationSourceStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(IntegrationSourceStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Timer) DeepCopyInto(out *Timer) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timer.
+func (in *Timer) DeepCopy() *Timer {
+	if in == nil {
+		return nil
+	}
+	out := new(Timer)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go
index 5d2955e0..786a698e 100644
--- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go
@@ -33,6 +33,7 @@ import (
 	messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
 	sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1"
 	sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
+	sourcesv1alpha1 "knative.dev/eventing/pkg/apis/sources/v1alpha1"
 	sourcesv1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2"
 )
 
@@ -49,6 +50,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
 	messagingv1.AddToScheme,
 	sinksv1alpha1.AddToScheme,
 	sourcesv1.AddToScheme,
+	sourcesv1alpha1.AddToScheme,
 	sourcesv1beta2.AddToScheme,
 }
 
diff --git a/vendor/knative.dev/eventing/pkg/eventingtls/eventingtls.go b/vendor/knative.dev/eventing/pkg/eventingtls/eventingtls.go
index 718a744c..eedea290 100644
--- a/vendor/knative.dev/eventing/pkg/eventingtls/eventingtls.go
+++ b/vendor/knative.dev/eventing/pkg/eventingtls/eventingtls.go
@@ -57,6 +57,9 @@ const (
 	BrokerFilterServerTLSSecretName = "mt-broker-filter-server-tls" //nolint:gosec // This is not a hardcoded credential
 	// BrokerIngressServerTLSSecretName is the name of the tls secret for the broker ingress server
 	BrokerIngressServerTLSSecretName = "mt-broker-ingress-server-tls" //nolint:gosec // This is not a hardcoded credential
+
+	// IntegrationSinkDispatcherServerTLSSecretName is the name of the tls secret for the integration sink dispatcher server
+	IntegrationSinkDispatcherServerTLSSecretName = "integration-sink-server-tls" //nolint:gosec // This is not a hardcoded credential
 )
 
 type ClientConfig struct {
diff --git a/vendor/knative.dev/hack/release.sh b/vendor/knative.dev/hack/release.sh
index 450c0671..a378a5f8 100644
--- a/vendor/knative.dev/hack/release.sh
+++ b/vendor/knative.dev/hack/release.sh
@@ -75,7 +75,7 @@ RELEASE_NOTES=""
 RELEASE_BRANCH=""
 RELEASE_GCS_BUCKET="knative-nightly/${REPO_NAME}"
 RELEASE_DIR=""
-KO_FLAGS="-P --platform=all"
+export KO_FLAGS="-P --platform=all"
 VALIDATION_TESTS="./test/presubmit-tests.sh"
 ARTIFACTS_TO_PUBLISH=""
 FROM_NIGHTLY_RELEASE=""
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 94a33f00..300730b6 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -418,12 +418,12 @@ go.uber.org/zap/internal/stacktrace
 go.uber.org/zap/internal/ztest
 go.uber.org/zap/zapcore
 go.uber.org/zap/zaptest
-# golang.org/x/mod v0.21.0
+# golang.org/x/mod v0.22.0
 ## explicit; go 1.22.0
 golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.30.0
+# golang.org/x/net v0.31.0
 ## explicit; go 1.18
 golang.org/x/net/http/httpguts
 golang.org/x/net/http2
@@ -432,23 +432,23 @@ golang.org/x/net/http2/hpack
 golang.org/x/net/idna
 golang.org/x/net/internal/timeseries
 golang.org/x/net/trace
-# golang.org/x/oauth2 v0.22.0
+# golang.org/x/oauth2 v0.23.0
 ## explicit; go 1.18
 golang.org/x/oauth2
 golang.org/x/oauth2/internal
-# golang.org/x/sync v0.8.0
+# golang.org/x/sync v0.9.0
 ## explicit; go 1.18
 golang.org/x/sync/errgroup
 golang.org/x/sync/semaphore
-# golang.org/x/sys v0.26.0
+# golang.org/x/sys v0.27.0
 ## explicit; go 1.18
 golang.org/x/sys/plan9
 golang.org/x/sys/unix
 golang.org/x/sys/windows
-# golang.org/x/term v0.25.0
+# golang.org/x/term v0.26.0
 ## explicit; go 1.18
 golang.org/x/term
-# golang.org/x/text v0.19.0
+# golang.org/x/text v0.20.0
 ## explicit; go 1.18
 golang.org/x/text/encoding
 golang.org/x/text/encoding/internal
@@ -463,7 +463,7 @@ golang.org/x/text/unicode/norm
 # golang.org/x/time v0.6.0
 ## explicit; go 1.18
 golang.org/x/time/rate
-# golang.org/x/tools v0.26.0
+# golang.org/x/tools v0.27.0
 ## explicit; go 1.22.0
 golang.org/x/tools/go/ast/astutil
 golang.org/x/tools/go/gcexportdata
@@ -492,14 +492,14 @@ gomodules.xyz/jsonpatch/v2
 # google.golang.org/api v0.183.0
 ## explicit; go 1.20
 google.golang.org/api/support/bundler
-# google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142
+# google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1
 ## explicit; go 1.21
 google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1
 ## explicit; go 1.21
 google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.67.1
-## explicit; go 1.21
+# google.golang.org/grpc v1.68.0
+## explicit; go 1.22.7
 google.golang.org/grpc
 google.golang.org/grpc/attributes
 google.golang.org/grpc/backoff
@@ -507,6 +507,8 @@ google.golang.org/grpc/balancer
 google.golang.org/grpc/balancer/base
 google.golang.org/grpc/balancer/grpclb/state
 google.golang.org/grpc/balancer/pickfirst
+google.golang.org/grpc/balancer/pickfirst/internal
+google.golang.org/grpc/balancer/pickfirst/pickfirstleaf
 google.golang.org/grpc/balancer/roundrobin
 google.golang.org/grpc/binarylog/grpc_binarylog_v1
 google.golang.org/grpc/channelz
@@ -556,7 +558,7 @@ google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/stats
 google.golang.org/grpc/status
 google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.35.1
+# google.golang.org/protobuf v1.35.2
 ## explicit; go 1.21
 google.golang.org/protobuf/encoding/protodelim
 google.golang.org/protobuf/encoding/protojson
@@ -976,8 +978,9 @@ k8s.io/utils/pointer
 k8s.io/utils/ptr
 k8s.io/utils/strings/slices
 k8s.io/utils/trace
-# knative.dev/eventing v0.43.0
-## explicit; go 1.22.0
+# knative.dev/eventing v0.43.1-0.20241213142054-414af5ce0fae
+## explicit; go 1.22.7
+knative.dev/eventing/pkg/apis/common/integration/v1alpha1
 knative.dev/eventing/pkg/apis/config
 knative.dev/eventing/pkg/apis/duck
 knative.dev/eventing/pkg/apis/duck/v1
@@ -998,6 +1001,7 @@ knative.dev/eventing/pkg/apis/sinks/v1alpha1
 knative.dev/eventing/pkg/apis/sources
 knative.dev/eventing/pkg/apis/sources/config
 knative.dev/eventing/pkg/apis/sources/v1
+knative.dev/eventing/pkg/apis/sources/v1alpha1
 knative.dev/eventing/pkg/apis/sources/v1beta2
 knative.dev/eventing/pkg/client/clientset/versioned/scheme
 knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1
@@ -1011,11 +1015,11 @@ knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2
 knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/fake
 knative.dev/eventing/pkg/crossnamespace
 knative.dev/eventing/pkg/eventingtls
-# knative.dev/hack v0.0.0-20241010131451-05b2fb30cb4d
+# knative.dev/hack v0.0.0-20241128013751-1978b3a02667
 ## explicit; go 1.21
 knative.dev/hack
-# knative.dev/networking v0.0.0-20241022012959-60e29ff520dc
-## explicit; go 1.22.0
+# knative.dev/networking v0.0.0-20241118075147-929a5d5f19d0
+## explicit; go 1.22.7
 knative.dev/networking/pkg
 knative.dev/networking/pkg/apis/networking
 knative.dev/networking/pkg/apis/networking/v1alpha1
@@ -1027,8 +1031,8 @@ knative.dev/networking/pkg/http/proxy
 knative.dev/networking/pkg/http/stats
 knative.dev/networking/pkg/ingress
 knative.dev/networking/pkg/k8s
-# knative.dev/pkg v0.0.0-20241021183759-9b9d535af5ad
-## explicit; go 1.22.0
+# knative.dev/pkg v0.0.0-20241128013618-f3ab5605e542
+## explicit; go 1.22.7
 knative.dev/pkg/apis
 knative.dev/pkg/apis/duck
 knative.dev/pkg/apis/duck/ducktypes
@@ -1075,8 +1079,8 @@ knative.dev/pkg/tracing/config
 knative.dev/pkg/tracing/propagation
 knative.dev/pkg/tracing/propagation/tracecontextb3
 knative.dev/pkg/tracker
-# knative.dev/serving v0.43.0
-## explicit; go 1.22.0
+# knative.dev/serving v0.43.1-0.20241209084051-2d5a1e99798a
+## explicit; go 1.22.7
 knative.dev/serving/pkg/apis/autoscaling
 knative.dev/serving/pkg/apis/autoscaling/v1alpha1
 knative.dev/serving/pkg/apis/config