From 7f87738c7081cbc32e6b9e321b538d57e7b3fab2 Mon Sep 17 00:00:00 2001 From: kpango Date: Wed, 14 Aug 2024 21:26:59 +0900 Subject: [PATCH] Add UpdateTimestamp API Signed-off-by: kpango --- .cspell.json | 4090 ++++++++++++++++- .gitfiles | 22 +- .github/helm/values/values-correction.yaml | 2 +- .../workflows/dockers-benchmark-job-image.yml | 4 +- .../dockers-benchmark-operator-image.yaml | 4 +- Makefile | 13 +- Makefile.d/client.mk | 45 - Makefile.d/dependencies.mk | 6 - Makefile.d/tools.mk | 29 +- README.md | 2 + apis/docs/v1/docs.md | 11 +- apis/grpc/v1/vald/update.pb.go | 41 +- apis/grpc/v1/vald/update_vtproto.pb.go | 45 + apis/grpc/v1/vald/vald.go | 1 + apis/proto/v1/vald/update.proto | 8 + apis/swagger/v1/vald/update.swagger.json | 23 +- charts/vald-benchmark-operator/README.md | 2 +- .../schemas/job-values.yaml | 2 +- .../templates/deployment.yaml | 2 +- charts/vald-benchmark-operator/values.yaml | 2 +- charts/vald/values.yaml | 2 +- dockers/agent/core/agent/Dockerfile | 6 +- dockers/agent/core/faiss/Dockerfile | 2 +- dockers/agent/core/ngt/Dockerfile | 2 +- dockers/agent/sidecar/Dockerfile | 2 +- dockers/binfmt/Dockerfile | 2 +- dockers/buildbase/Dockerfile | 2 +- dockers/buildkit/Dockerfile | 2 +- dockers/ci/base/Dockerfile | 5 +- dockers/dev/Dockerfile | 7 +- dockers/discoverer/k8s/Dockerfile | 2 +- dockers/gateway/filter/Dockerfile | 2 +- dockers/gateway/lb/Dockerfile | 2 +- dockers/gateway/mirror/Dockerfile | 2 +- dockers/index/job/correction/Dockerfile | 2 +- dockers/index/job/creation/Dockerfile | 2 +- .../index/job/readreplica/rotate/Dockerfile | 2 +- dockers/index/job/save/Dockerfile | 2 +- dockers/index/operator/Dockerfile | 2 +- dockers/manager/index/Dockerfile | 2 +- dockers/operator/helm/Dockerfile | 2 +- dockers/tools/benchmark/job/Dockerfile | 2 +- dockers/tools/benchmark/operator/Dockerfile | 2 +- dockers/tools/cli/loadtest/Dockerfile | 2 +- docs/contributing/unit-test-guideline.md | 2 +- .../observability-configuration.md | 2 +- example/client/go.mod | 6 +- example/client/go.sum | 8 +- go.mod | 42 +- go.sum | 53 +- hack/cspell/main.go | 277 ++ hack/docker/gen/main.go | 27 +- internal/backoff/backoff_test.go | 18 +- internal/cache/gache/option_test.go | 18 +- internal/cache/option.go | 4 +- internal/circuitbreaker/breaker.go | 4 +- internal/circuitbreaker/breaker_test.go | 54 +- internal/circuitbreaker/options.go | 2 +- internal/client/v1/client/vald/vald.go | 34 + internal/compress/gob_test.go | 6 +- internal/compress/lz4_test.go | 8 +- internal/config/cassandra_test.go | 2 +- internal/config/log.go | 2 +- internal/core/algorithm/ngt/ngt_test.go | 16 +- internal/db/rdb/mysql/dbr/dbr.go | 2 +- internal/db/rdb/mysql/dbr/insert.go | 4 +- internal/db/rdb/mysql/dbr/session.go | 4 +- internal/db/rdb/mysql/dbr/tx.go | 2 +- internal/db/rdb/mysql/mysql_test.go | 22 +- internal/db/rdb/mysql/option.go | 4 +- .../db/storage/blob/cloudstorage/option.go | 2 +- internal/db/storage/blob/s3/reader/option.go | 2 +- internal/db/storage/blob/s3/s3_test.go | 2 +- .../storage/blob/s3/session/session_test.go | 4 +- internal/errors/agent.go | 5 + internal/errors/corrector.go | 2 +- internal/errors/grpc.go | 2 +- internal/errors/net.go | 2 +- internal/errors/option_test.go | 10 +- internal/errors/redis.go | 2 +- internal/errors/redis_test.go | 36 +- internal/errors/tls.go | 4 +- internal/errors/vald.go | 2 +- internal/info/info.go | 2 +- internal/log/option_test.go | 6 +- internal/net/dialer_test.go | 2 +- .../grpc/interceptor/client/metric/metric.go | 4 +- .../grpc/interceptor/server/metric/metric.go | 4 +- internal/net/http/json/json_test.go | 14 +- internal/tls/tls.go | 2 +- internal/worker/queue.go | 6 +- internal/worker/queue_option.go | 2 +- pkg/agent/core/faiss/service/faiss.go | 2 +- .../core/ngt/handler/grpc/object_test.go | 2 +- pkg/agent/core/ngt/service/ngt.go | 8 +- pkg/agent/core/ngt/service/ngt_test.go | 26 +- pkg/agent/internal/kvs/kvs_test.go | 16 +- pkg/gateway/lb/handler/grpc/handler.go | 441 +- pkg/gateway/lb/service/gateway.go | 7 +- pkg/gateway/mirror/handler/grpc/handler.go | 10 +- pkg/gateway/mirror/service/mirror.go | 26 +- pkg/gateway/mirror/service/mirror_option.go | 4 +- pkg/gateway/mirror/service/mirror_test.go | 144 +- pkg/gateway/mirror/usecase/vald.go | 8 +- pkg/index/job/correction/service/corrector.go | 818 ++-- pkg/index/job/correction/usecase/corrector.go | 4 +- .../job/readreplica/rotate/service/rotator.go | 4 +- pkg/index/operator/service/operator.go | 6 +- pkg/manager/index/service/indexer.go | 4 +- pkg/tools/benchmark/job/config/config.go | 2 +- .../benchmark/operator/service/operator.go | 4 +- .../operator/service/operator_test.go | 2 +- rust/Cargo.lock | 61 +- rust/bin/agent/src/handler/update.rs | 8 + rust/libs/proto/src/vald.v1.tonic.rs | 87 + tests/e2e/crud/crud_test.go | 4 +- .../agent/core/ngt/service/ngt_e2s_test.go | 4 +- versions/PROMETHEUS_STACK_VERSION | 2 +- versions/VALDCLI_VERSION | 1 - 119 files changed, 5613 insertions(+), 1246 deletions(-) delete mode 100644 Makefile.d/client.mk create mode 100644 hack/cspell/main.go delete mode 100644 versions/VALDCLI_VERSION diff --git a/.cspell.json b/.cspell.json index 71ced17ccf0..1e718380d34 100644 --- a/.cspell.json +++ b/.cspell.json @@ -1,226 +1,4070 @@ { - "version": "0.2", - "language": "en", - "words": [ - "ACCESSS", + "import": [ + "@cspell/dict-cpp/cspell-ext.json", + "@cspell/dict-docker/cspell-ext.json", + "@cspell/dict-en_us/cspell-ext.json", + "@cspell/dict-fullstack/cspell-ext.json", + "@cspell/dict-git/cspell-ext.json", + "@cspell/dict-golang/cspell-ext.json", + "@cspell/dict-k8s/cspell-ext.json", + "@cspell/dict-makefile/cspell-ext.json", + "@cspell/dict-markdown/cspell-ext.json", + "@cspell/dict-npm/cspell-ext.json", + "@cspell/dict-public-licenses/cspell-ext.json", + "@cspell/dict-rust/cspell-ext.json", + "@cspell/dict-shell/cspell-ext.json" + ], + "ignorePaths": [ + "**/*.ai", + "**/*.drawio", + "**/*.hdf5", + "**/*.key", + "**/*.lock", + "**/*.log", + "**/*.md5", + "**/*.pack", + "**/*.pdf", + "**/*.pem", + "**/*.png", + "**/*.sum", + "**/*.svg", + "**/.git/objects/**", + "**/cmd/agent/core/faiss/faiss", + "**/cmd/agent/core/ngt/ngt", + "**/cmd/agent/sidecar/sidecar", + "**/cmd/discoverer/k8s/discoverer", + "**/cmd/gateway/filter/filter", + "**/cmd/gateway/lb/lb", + "**/cmd/gateway/mirror/mirror", + "**/cmd/index/job/correction/index-correction", + "**/cmd/index/job/creation/index-creation", + "**/cmd/index/job/readreplica/rotate/readreplica-rotate", + "**/cmd/index/job/save/index-save", + "**/cmd/index/operator/index-operator", + "**/cmd/manager/index/index", + "**/cmd/tools/benchmark/job/job", + "**/cmd/tools/benchmark/operator/operator", + "**/cmd/tools/cli/loadtest/loadtest", + "**/internal/core/algorithm/ngt/assets/index", + "**/internal/test/data/agent/ngt/validIndex" + ], + "ignoreWords": [ "AQUASECURITY", - "AUTOBUILD", + "Addrs", + "Atof", + "Atol", + "Autoscaler", + "BINFMT", + "BUILDBASE", + "BUILDKIT", "BUILDX", - "Burstable", + "Bbolt", + "Buildx", + "CAPI", + "CHATOPS", "Capi", + "DISTROLESS", + "DNSA", + "Debugd", + "Debugf", + "Devcontainer", + "EUCJP", + "Errord", "Errorf", + "Eucjp", "FAISS", + "FASTOPEN", + "Faiss", + "Fatald", + "Fnum", + "GACHE", + "GETOBJECT", "GHACTION", + "GOBIN", "GOLANGCILINT", - "GOMAXPROCS", - "Godoc", + "GOLINES", + "GOPATH", + "GOPKG", + "GOPRIVATE", + "GOROOT", + "GOTEST", + "GOTESTS", + "Gache", + "Gocqlx", + "Goleak", + "IDRPC", + "INITCONTAINER", + "Idxs", + "Iface", + "Indegree", + "Infod", "Infof", + "Inuse", + "Jaccard", + "KEEPIDLE", + "KEYSPACE", + "KLOG", + "KUBECONFIG", "KUBELINTER", + "KVSDB", + "Keyspace", + "Kvsdb", "LANGUAGETOOL", - "Milli", + "LDFLAGS", + "LOADTEST", + "LOGRUS", + "LOGRUs", + "MNIST", + "Mallocs", + "Mirr", + "Nbits", + "Nocie", "ONNX", - "OTEL", - "PROTOBUF", + "Oneof", + "Outdegree", + "Outf", + "PORTFORWARD", + "Portforward", + "Prost", + "Ptop", + "Pyroscope", + "QUICKACK", + "RDONLY", + "READREPLICA", "REVIEWDOG", - "Roundtripper", + "ROOTDIR", + "RUSTUP", + "Readreplica", + "Rebalance", + "Regist", + "Represets", + "Retryable", + "Reviewdog", + "Rootdir", + "Ruleguard", "SARIF", - "SOFTPROPS", - "Structs", + "SYFT", + "Sjis", + "Stmts", + "Struct", + "Svcs", "TELEPRESENCE", + "TEXTLINT", + "TMPDIR", + "Tgts", + "Tolerations", + "UPSERT", + "Unmarshal", + "Upsert", "VALD", - "VALDCLI", + "VALDRELEASE", + "VECTORIZER", "VHOR", "Vald", + "Vald's", + "Vals", + "Vecs", "Vectorizer", - "accesslog", - "achive", + "Vqueue", + "Warnd", + "Warnf", + "Wrapf", + "ZEROLOG", + "ZSTD", + "Zstd", "addrs", + "akrylysov", + "anypb", + "apiextensions", + "apimachinery", "apiserver", - "attirbute", + "aquasecurity", + "autoclean", + "automaxprocs", + "autoremove", + "backoffmetrics", "bbolt", - "boudary", - "brandguidelines", + "bigann", + "binfmt", + "bufbuild", "buildbase", + "buildkit", + "buildx", + "bvecs", + "bzrignore", + "canceld", + "cbmetrics", "chatops", - "chrono", "circuitbreaker", + "clientip", "cloudstorage", "clusterrole", - "clusterrolebinding", + "cockroachdb", + "codegen", "configmap", - "contributorsrc", - "conv", + "corev", "crds", - "crlfmt", + "cstring", "ctxio", "daemonset", - "deepsource", + "dataspace", + "decbytes", "devcontainer", + "devcontainers", "devel", + "dicoverer", + "distroless", + "dotproduct", + "dpkg", + "elif", + "emap", "envoyproxy", + "eoptions", "errdetails", - "errgroup", - "facebookresearch", + "esac", + "eucjp", + "extendee", + "faild", "faiss", "fastime", + "flot", + "fmap", + "fnum", + "fsnotify", "fuid", + "fvecs", "gache", + "gcsblob", + "genproto", "getobject", - "getstarted", - "gitfiles", + "gfortran", + "ghaction", + "glog", + "gobc", + "goccy", + "gocloud", + "gocql", + "gocqlx", + "gocraft", "gofumpt", "goimports", "golangci", "goleak", "golines", - "gongt", + "gomnd", "gonum", + "gopkg", + "gopls", + "gopter", "gorules", "gotest", + "gotestfmt", "gotests", "gotmpl", - "hadolint", - "helmignore", - "httputil", - "icfg", + "gpgsign", + "healthz", + "hgignore", + "hlts", + "iconfig", + "idelay", + "iface", + "indegree", + "infometrics", "initcontainer", "innerproduct", - "ioutil", + "inuse", + "ivecs", "jaccard", - "japansearch", - "kosarak", + "jitted", + "keepalive", + "keyspace", + "klauspost", + "klog", + "kmrmt", "kpango", + "kprofefe", + "kubeconfig", "kubelinter", - "kustomization", "kvsdb", + "languagetool", + "ldconfig", + "leanovate", + "libaec", "libhdf", - "loadbalancer", + "liblapack", + "libomp", + "libopenblas", + "livenesss", "loadtest", - "lycorp", - "maxprocs", - "minio", + "localtime", + "logrus", + "mallocs", + "metav", + "mirr", + "mktemp", + "mktmp", "mnist", + "moby", + "mthe", "multiapis", - "multicluster", - "nanos", "nbits", - "networkpolicy", + "nindent", "nlist", + "nocie", + "nogce", + "nolint", + "noninteractive", + "nonroot", "normalizedangle", "normalizedcosine", + "normalizedl", + "normang", + "normcos", + "norml", "nosql", - "nytimes", + "objs", "oneof", "onnx", - "otel", - "otlp", + "opencensus", + "otelgrpc", + "otlptrace", + "outdegree", + "pipefail", + "planetscale", "pogreb", + "poinc", + "pointradius", "portforward", "pprof", - "priorityclass", + "preriodically", "profefe", - "promtail", "prost", - "protobuf", "protoc", - "protos", + "protocolbuffers", + "protoimpl", + "protoreflect", + "protovalidate", + "pstartf", + "pstopf", "pyroscope", + "quasilyte", "readreplica", "rebalance", - "rebalancing", "replicasets", - "rerank", - "retrive", + "resered", + "retryable", "reviewdog", - "rustc", + "rgba", + "rinx", + "roccd", + "rolebindings", + "rpcs", + "ruleguard", "rustup", - "serviceaccount", - "singleflight", + "sarif", + "scylladb", + "sess", + "signingkey", + "signoff", + "sjis", + "skipcq", + "snapshotter", + "sparsejaccard", + "spjac", + "srvs", + "sspan", "statefulset", - "steamsearch", - "streaminsert", - "structs", - "subtests", - "testdata", - "textlintrc", + "stmts", + "stos", + "stretchr", + "struct", + "substr", + "svcs", + "syft", + "tagalign", + "telepresence", + "textlint", + "tgts", + "timepicker", "timeutil", + "tmpdir", + "tmpfs", + "tmproj", + "tolerations", + "tparse", "traefik", + "trunc", + "tzdata", + "ultiple", + "unixgram", + "unixpacket", + "unmarshal", "upsert", "upserted", "urlopener", "usecase", "vald", + "vald's", "valdbenchmarkjob", + "valdbenchmarkjobs", "valdbenchmarkoperatorrelease", "valdbenchmarkscenario", + "valdbenchmarkscenarios", "valdhelmoperatorrelease", - "valdhelmopratorreleases", - "valdmirrortarget", - "valdmirrortargets", + "valdhelmoperatorreleases", "valdrelease", - "vbor", + "valdreleases", + "vals", + "vankichi", "vdaas", - "vdctl", + "vecs", "vectorizer", - "vectorizing", "vhor", + "vmhwm", + "vmpeak", + "vmrss", + "volumesnapshots", "vqueue", "vtproto", - "werr", - "whitesource", + "wfci", + "xaxis", "yahoojapan", "yamlfmt", + "yaxes", + "yaxis", + "zeebo", + "zerolog", + "zoneinfo", "zstd" ], - "ignoreWords": [ - "CMYK", - "SHOGO", - "TECHNOTE", - "agentd", - "benchmarkd", - "conflint", - "sidecard" - ], - "dictionaries": [ - "softwareTerms", - "misc", - "companies", - "typescript", - "node", - "html", - "css", - "fonts", - "filetypes", - "npm" + "ignoreRegExpList": [ + ".*addr$", + ".*buf$", + ".*cfg$", + ".*ch$", + ".*conf$", + ".*ctx$", + ".*dur$", + ".*err$", + ".*group$", + ".*len$", + ".*opts$" ], - "ignorePaths": [ - "**/*.ai", - "**/*.drawio", - "**/*.hdf5", - "**/*.key", - "**/*.lock", - "**/*.log", - "**/*.md5", - "**/*.pack", - "**/*.pdf", - "**/*.pem", - "**/*.png", - "**/*.sum", - "**/*.svg", - "**/.git/objects/**", - "**/cmd/agent/core/faiss/faiss", - "**/cmd/agent/core/ngt/ngt", - "**/cmd/agent/sidecar/sidecar", - "**/cmd/discoverer/k8s/k8s", - "**/cmd/gateway/filter/filter", - "**/cmd/gateway/lb/lb", - "**/cmd/gateway/mirror/mirror", - "**/cmd/index/job/correction/correction", - "**/cmd/index/job/creation/creation", - "**/cmd/index/job/readreplica/rotate/rotate", - "**/cmd/index/job/save/save", - "**/cmd/manager/index/index", - "**/internal/core/algorithm/ngt/assets/index", - "**/internal/test/data/agent/ngt/validIndex" + "overrides": [ + { + "filename": ".all-contributorsrc", + "ignoreWords": [ + "Funakoshi", + "Grimaud", + "Hiroto", + "Hrichik", + "Kadowaki", + "Kato", + "Katz", + "Kiichiro", + "Koichi", + "Kosuke", + "Mazumder", + "Morimoto", + "Okamura", + "Rintaro", + "Shiraishi", + "Siyuan", + "YUKAWA", + "Yusuke", + "aknishid", + "ando", + "datelier", + "dotdc", + "hrichiksite", + "junsei", + "kevindiu", + "liusy", + "pgrimaud", + "taisuou", + "takuyaymd", + "thedrow", + "ykadowak", + "zchee" + ] + }, + { + "filename": ".commit_template", + "ignoreWords": [ + "bento", + "tada" + ] + }, + { + "filename": ".devcontainer/devcontainer.json", + "ignoreWords": [ + "PTRACE", + "commandhistory", + "seccomp", + "zshhistory" + ] + }, + { + "filename": ".devcontainer/postAttachCommand.sh", + "ignoreWords": [ + "commandhistory" + ] + }, + { + "filename": ".fossa.yml", + "ignoreWords": [ + "vdctl" + ] + }, + { + "filename": ".git/FETCH_HEAD", + "ignoreWords": [ + "binaryindex", + "enought", + "guildeline", + "rebalancer", + "valuesgo" + ] + }, + { + "filename": ".git/hooks/applypatch-msg.sample", + "ignoreWords": [ + "commitmsg" + ] + }, + { + "filename": ".git/hooks/fsmonitor-watchman.sample", + "ignoreWords": [ + "CHLD", + "binmode", + "clockid", + "msys" + ] + }, + { + "filename": ".git/hooks/pre-commit.sample", + "ignoreWords": [ + "allownonascii" + ] + }, + { + "filename": ".git/hooks/pre-rebase.sample", + "ignoreWords": [ + "Hamano", + "Junio", + "oneline" + ] + }, + { + "filename": ".git/hooks/pre-receive.sample", + "ignoreWords": [ + "echoback" + ] + }, + { + "filename": ".git/hooks/push-to-checkout.sample", + "ignoreWords": [ + "behaviour" + ] + }, + { + "filename": ".git/hooks/sendemail-validate.sample", + "ignoreWords": [ + "SENDEMAIL", + "Worktree", + "sendemail", + "worktree" + ] + }, + { + "filename": ".git/hooks/update.sample", + "ignoreWords": [ + "allowdeletetag", + "allowmodifytag", + "allowunannotated", + "newrev", + "oldrev", + "projectdesc" + ] + }, + { + "filename": ".git/packed-refs", + "ignoreWords": [ + "binaryindex", + "guildeline", + "rebalancer" + ] + }, + { + "filename": ".gitattributes", + "ignoreWords": [ + "contributorsrc" + ] + }, + { + "filename": ".gitfiles", + "ignoreWords": [ + "AUTOBUILD", + "Autobuild", + "CMYK", + "CONFLINT", + "DEEPSOURCE", + "Deepsource", + "HADOLINT", + "SHOGO", + "SOFTPROPS", + "Singleflight", + "TECHNOTE", + "accesslog", + "agentd", + "autobuild", + "benchmarkd", + "brandguidelines", + "clusterrolebinding", + "conflint", + "contributorsrc", + "conv", + "darkgray", + "deepsource", + "getstarted", + "gitfiles", + "gongt", + "hadolint", + "helmignore", + "kosarak", + "kustomization", + "lycorp", + "multicluster", + "networkpolicy", + "nytimes", + "priorityclass", + "promtail", + "serviceaccount", + "shogo", + "sidecard", + "singleflight", + "softprops", + "technote", + "testdata", + "textlintrc", + "tmpl", + "valdmirrortarget", + "vdctl", + "whitesource" + ] + }, + { + "filename": ".github/actions/docker-build/action.yaml", + "ignoreWords": [ + "opencontainers" + ] + }, + { + "filename": ".github/actions/e2e-deploy-vald-helm-operator/action.yaml", + "ignoreWords": [ + "PODNAME", + "podname" + ] + }, + { + "filename": ".github/actions/e2e-deploy-vald-readreplica/action.yaml", + "ignoreWords": [ + "PODNAME", + "podname" + ] + }, + { + "filename": ".github/actions/e2e-deploy-vald/action.yaml", + "ignoreWords": [ + "PODNAME", + "podname" + ] + }, + { + "filename": ".github/actions/notify-slack/action.yaml", + "ignoreWords": [ + "TECHNOTE", + "technote" + ] + }, + { + "filename": ".github/chatops_commands.md", + "ignoreWords": [ + "bento" + ] + }, + { + "filename": ".github/chatops_permissions.yaml", + "ignoreWords": [ + "datelier", + "kevindiu" + ] + }, + { + "filename": ".github/conflint.yaml", + "ignoreWords": [ + "kubeval" + ] + }, + { + "filename": ".github/dependabot.yml", + "ignoreWords": [ + "gomod" + ] + }, + { + "filename": ".github/helm/values/values-agent-sidecar.yaml", + "ignoreWords": [ + "ACCESSKEY", + "SECRETKEY" + ] + }, + { + "filename": ".github/helm/values/values-chaos.yaml", + "ignoreWords": [ + "serversscheme" + ] + }, + { + "filename": ".github/helm/values/values-profile.yaml", + "ignoreWords": [ + "GOMAXPROCS" + ] + }, + { + "filename": ".github/helm/values/values-readreplica.yaml", + "ignoreWords": [ + "snapclass" + ] + }, + { + "filename": ".github/workflows/_docker-image-scan.yaml", + "ignoreWords": [ + "imagename", + "opencontainers" + ] + }, + { + "filename": ".github/workflows/_docker-image.yaml", + "ignoreWords": [ + "DOCKERHUB", + "buildkitd", + "stargz" + ] + }, + { + "filename": ".github/workflows/backport.yml", + "ignoreWords": [ + "startswith" + ] + }, + { + "filename": ".github/workflows/build-binaries.yml", + "ignoreWords": [ + "SHOGO", + "shogo" + ] + }, + { + "filename": ".github/workflows/chatops.yml", + "ignoreWords": [ + "gentest" + ] + }, + { + "filename": ".github/workflows/codeql-analysis.yml", + "ignoreWords": [ + "AUTOBUILD", + "Autobuild", + "autobuild" + ] + }, + { + "filename": ".github/workflows/coverage.yml", + "ignoreWords": [ + "DEEPSOURCE", + "Deepsource", + "deepsource" + ] + }, + { + "filename": ".github/workflows/detect-internal-config-changes.yml", + "ignoreWords": [ + "interal" + ] + }, + { + "filename": ".github/workflows/dockers-gateway-mirror-image.yaml", + "ignoreWords": [ + "nirror" + ] + }, + { + "filename": ".github/workflows/e2e-chaos.yaml", + "ignoreWords": [ + "clusterwide" + ] + }, + { + "filename": ".github/workflows/e2e-max-dim.yml", + "ignoreWords": [ + "readlink" + ] + }, + { + "filename": ".github/workflows/e2e-profiling.yml", + "ignoreWords": [ + "threadcreate" + ] + }, + { + "filename": ".github/workflows/fossa.yml", + "ignoreWords": [ + "urllib" + ] + }, + { + "filename": ".github/workflows/labeler.yml", + "ignoreWords": [ + "shortstat" + ] + }, + { + "filename": ".github/workflows/release.yml", + "ignoreWords": [ + "GOPROXY", + "SOFTPROPS", + "goproxy", + "softprops" + ] + }, + { + "filename": ".github/workflows/reviewdog-hadolint.yml", + "ignoreWords": [ + "Dockerfiles", + "HADOLINT", + "dockerfiles", + "hadolint" + ] + }, + { + "filename": ".github/workflows/reviewdog-k8s.yml", + "ignoreWords": [ + "CONFLINT", + "conflint", + "kubeval" + ] + }, + { + "filename": ".github/workflows/reviewdog-markdown.yml", + "ignoreWords": [ + "testlint" + ] + }, + { + "filename": ".github/workflows/test-hack.yml", + "ignoreWords": [ + "notests", + "smallscreen" + ] + }, + { + "filename": ".github/workflows/unit-test.yaml", + "ignoreWords": [ + "notests", + "smallscreen" + ] + }, + { + "filename": ".gitignore", + "ignoreWords": [ + "MSVC", + "dylib", + "nvim", + "nvimlog", + "rustc", + "rustfmt" + ] + }, + { + "filename": ".golangci.yml", + "ignoreWords": [ + "Singleflight", + "asasalint", + "asciicheck", + "bidichk", + "bodyclose", + "contextcheck", + "copylocks", + "cyclop", + "decorder", + "depguard", + "dupl", + "dupword", + "durationcheck", + "errcheck", + "errchkjson", + "errname", + "errorlint", + "execinquery", + "exhaustruct", + "exportloopref", + "forbidigo", + "forcetypeassert", + "ginkgolinter", + "gocheckcompilerdirectives", + "gochecknoglobals", + "gochecknoinits", + "gocognit", + "goconst", + "gocritic", + "gocyclo", + "godox", + "gofmt", + "goheader", + "gomoddirectives", + "gomodguard", + "goprintffuncname", + "gosec", + "gosimple", + "gosmopolitan", + "govet", + "importas", + "ineffassign", + "interfacebloat", + "ireturn", + "loggercheck", + "logrlint", + "makezero", + "megacheck", + "musttag", + "nakedret", + "nestif", + "nilnil", + "nlreturn", + "nolintlint", + "nonamedreturns", + "nosprintfhostport", + "paralleltest", + "prealloc", + "predeclared", + "promlinter", + "rowserrcheck", + "singleflight", + "sqlclosecheck", + "staticcheck", + "stylecheck", + "testableexamples", + "testpackage", + "thelper", + "tparallel", + "unconvert", + "unparam", + "usestdlibvars", + "vetshadow", + "wastedassign", + "wrapcheck", + "wslissues" + ] + }, + { + "filename": ".prh.yaml", + "ignoreWords": [ + "Burstable", + "Flamegraph", + "Godoc", + "NGTD", + "besteffort", + "burstable", + "documentaion", + "flamegraph", + "godoc", + "ngtd", + "valdcli" + ] + }, + { + "filename": ".textlintrc", + "ignoreWords": [ + "AUTOBUILD", + "Autobuild", + "Godoc", + "NGTD", + "autobuild", + "godoc", + "idrequest", + "mevie", + "ngtd", + "rerank", + "sptag", + "subtest", + "vektor" + ] + }, + { + "filename": "CHANGELOG.md", + "ignoreWords": [ + "CFLAGS", + "CXXFLAGS", + "Cellebration", + "DEEPSOURCE", + "Deepsource", + "Dockerfiles", + "HADOLINT", + "Metas", + "Migratation", + "OSDK", + "PBDOCS", + "Singleflight", + "Stackdriver", + "Tutotial", + "WORKDIR", + "Workdir", + "alogrithm", + "ando", + "apiversion", + "bento", + "bidi", + "bulkinsert", + "cass", + "cheking", + "continous", + "conv", + "createindex", + "deeepsource", + "deepsource", + "depentency", + "devcontiner", + "dockerfiles", + "dotdc", + "exection", + "exhaustruct", + "exsiting", + "gache's", + "gorountine", + "hadolint", + "hrichiksite", + "informations", + "iocopy", + "junsei", + "libquadmath", + "lincense", + "liusy", + "makr", + "malloc", + "metas", + "minnum", + "multiplatforms", + "nvimlog", + "osdk", + "pacakge", + "pacicked", + "pbdocs", + "performace", + "priorityclasses", + "savedmodel", + "singleflight", + "slowloris", + "sptag", + "stackdriver", + "tada", + "takuyaymd", + "tensorlfow", + "tset", + "unkeyed", + "unneccessary", + "valdcli", + "vcache", + "vqueue's", + "workdir", + "yamls", + "ykadowak", + "zchee" + ] + }, + { + "filename": "CONTRIBUTING.md", + "ignoreWords": [ + "Firstname", + "Lastname", + "implmentes", + "newfeature" + ] + }, + { + "filename": "Makefile", + "ignoreWords": [ + "BLAS", + "CRORG", + "DBLA", + "DBUILD", + "DCMAKE", + "DFAISS", + "Dockerfiles", + "EXTLDFLAGS", + "GHCRORG", + "GOCACHE", + "GOPROXY", + "GROUNDTRUTH", + "MAKELISTS", + "NGTD", + "NPROCESSORS", + "NUMPANES", + "ONLN", + "Ofast", + "PBDOCS", + "PBGOS", + "PROTODIRS", + "PROTOS", + "RLENGTH", + "RSTART", + "STDDEV", + "armv", + "copress", + "crlfmt", + "dockerfiles", + "extldflags", + "fmerge", + "fopenmp", + "funroll", + "gitfiles", + "goproxy", + "groundtruth", + "gsub", + "laec", + "lgfortran", + "lhdf", + "libfaiss", + "llapack", + "lopenblas", + "lstdc", + "mtune", + "ncpu", + "ngtd", + "nproc", + "pbdocs", + "protos", + "pthread", + "relro", + "strictgoimports", + "toplevel" + ] + }, + { + "filename": "Makefile.d/bench.mk", + "ignoreWords": [ + "GROUNDTRUTH", + "benchmem", + "cpuprofile", + "groundtruth", + "memprofile", + "nvim" + ] + }, + { + "filename": "Makefile.d/build.mk", + "ignoreWords": [ + "EXTLDFLAGS", + "extldflags", + "linkmode", + "popd", + "pushd" + ] + }, + { + "filename": "Makefile.d/dependencies.mk", + "ignoreWords": [ + "GOCACHE", + "modcache", + "testcache" + ] + }, + { + "filename": "Makefile.d/docker.mk", + "ignoreWords": [ + "CRORG", + "GHCRORG", + "buildcache", + "mediatypes", + "npmjs" + ] + }, + { + "filename": "Makefile.d/e2e.mk", + "ignoreWords": [ + "ECRUD" + ] + }, + { + "filename": "Makefile.d/functions.mk", + "ignoreWords": [ + "APIV", + "EXTLDFLAGS", + "PBGOS", + "buildid", + "extldflags", + "modcacherw", + "netgo", + "osusergo", + "trimpath" + ] + }, + { + "filename": "Makefile.d/helm.mk", + "ignoreWords": [ + "valdmirrortarget", + "xzvf" + ] + }, + { + "filename": "Makefile.d/k3d.mk", + "ignoreWords": [ + "loadbalancer", + "storageclass" + ] + }, + { + "filename": "Makefile.d/k8s.mk", + "ignoreWords": [ + "CRORG", + "cainjector", + "jaegertracing", + "operatorusing", + "promtail", + "serrver" + ] + }, + { + "filename": "Makefile.d/kind.mk", + "ignoreWords": [ + "conntrack", + "netfilter" + ] + }, + { + "filename": "Makefile.d/minikube.mk", + "ignoreWords": [ + "hostpath", + "storageclass" + ] + }, + { + "filename": "Makefile.d/proto.mk", + "ignoreWords": [ + "PROTOS", + "protobufs", + "protos" + ] + }, + { + "filename": "Makefile.d/test.mk", + "ignoreWords": [ + "covermode", + "coverprofile", + "cweill", + "gotesttools", + "mfridman", + "notests", + "showteststatus" + ] + }, + { + "filename": "Makefile.d/tools.mk", + "ignoreWords": [ + "DBUILD", + "DCMAKE", + "DHDF", + "DZLIB", + "busa", + "crlfmt", + "fatih", + "gomodifytags", + "goplay", + "haya", + "honnef", + "josharian", + "libz", + "momotaro", + "mvdan", + "segmentio", + "staticcheck", + "strictgoimports", + "tlsv", + "xzvf" + ] + }, + { + "filename": "README.md", + "ignoreWords": [ + "Codacy", + "Funakoshi", + "Grimaud", + "Hiroto", + "Hrichik", + "Kadowaki", + "Kato", + "Katz", + "Kiichiro", + "Koichi", + "Kosuke", + "Mazumder", + "Morimoto", + "Okamura", + "Rintaro", + "Shiraishi", + "Siyuan", + "YUKAWA", + "Yusuke", + "aknishid", + "ando", + "datelier", + "junsei", + "kevindiu", + "liusy", + "lycorp", + "srcset", + "taisuou", + "takuyaymd", + "thedrow", + "zchee" + ] + }, + { + "filename": "apis/docs/v1/docs.md", + "ignoreWords": [ + "Bignum", + "Fixnum", + "STOCKOUT", + "Sint", + "hasn", + "sfixed", + "sint" + ] + }, + { + "filename": "apis/grpc/v1/payload/payload.pb.go", + "ignoreWords": [ + "wrapperspb" + ] + }, + { + "filename": "apis/grpc/v1/payload/payload.pb.json.go", + "ignoreWords": [ + "protojson" + ] + }, + { + "filename": "apis/grpc/v1/payload/payload_vtproto.pb.go", + "ignoreWords": [ + "Indexmapkey", + "Indexmapvalue", + "Lenmapkey", + "Lenmapvalue", + "mapkey", + "mapvalue", + "postmsg", + "protohelpers", + "vtmsg", + "vtpb", + "wiretype", + "wrapperspb" + ] + }, + { + "filename": "apis/grpc/v1/rpc/errdetails/error_details.pb.go", + "ignoreWords": [ + "STOCKOUT", + "durationpb" + ] + }, + { + "filename": "apis/grpc/v1/rpc/errdetails/error_details.pb.json.go", + "ignoreWords": [ + "protojson" + ] + }, + { + "filename": "apis/grpc/v1/rpc/errdetails/error_details_vtproto.pb.go", + "ignoreWords": [ + "Indexmapkey", + "Indexmapvalue", + "Lenmapkey", + "Lenmapvalue", + "durationpb", + "mapkey", + "mapvalue", + "protohelpers", + "wiretype" + ] + }, + { + "filename": "apis/proto/v1/agent/core/agent.proto", + "ignoreWords": [ + "createandsave" + ] + }, + { + "filename": "apis/proto/v1/payload/payload.proto", + "ignoreWords": [ + "objc" + ] + }, + { + "filename": "apis/proto/v1/rpc/errdetails/error_details.proto", + "ignoreWords": [ + "STOCKOUT", + "objc" + ] + }, + { + "filename": "apis/swagger/v1/agent/core/agent.swagger.json", + "ignoreWords": [ + "createandsave" + ] + }, + { + "filename": "assets/test/templates/common/fill.tmpl", + "ignoreWords": [ + "uintptr" + ] + }, + { + "filename": "assets/test/templates/common/function.tmpl", + "ignoreWords": [ + "Subtests", + "subtests" + ] + }, + { + "filename": "assets/test/templates/option/function.tmpl", + "ignoreWords": [ + "Subtests", + "subtests" + ] + }, + { + "filename": "buf.gen.yaml", + "ignoreWords": [ + "mfridman", + "neoeinstein", + "openapiv", + "pseudomuto" + ] + }, + { + "filename": "charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml", + "ignoreWords": [ + "vbjs" + ] + }, + { + "filename": "charts/vald-benchmark-operator/crds/valdbenchmarkoperatorrelease.yaml", + "ignoreWords": [ + "valdbenchmarkoperator", + "valdbenchmarkoperatorreleases", + "vbor", + "vbors" + ] + }, + { + "filename": "charts/vald-benchmark-operator/crds/valdbenchmarkscenario.yaml", + "ignoreWords": [ + "vbss" + ] + }, + { + "filename": "charts/vald-benchmark-operator/templates/clusterrole.yaml", + "ignoreWords": [ + "deletecollection" + ] + }, + { + "filename": "charts/vald-helm-operator/README.md", + "ignoreWords": [ + "readyz" + ] + }, + { + "filename": "charts/vald-helm-operator/crds/valdhelmoperatorrelease.yaml", + "ignoreWords": [ + "vhors" + ] + }, + { + "filename": "charts/vald-helm-operator/templates/clusterrole.yaml", + "ignoreWords": [ + "clusterrolebindings", + "clusterroles", + "customresourcedefinitions", + "horizontalpodautoscalers", + "networkpolicies", + "persistentvolumeclaims", + "poddisruptionbudgets", + "priorityclasses", + "serviceaccounts", + "statefulsets" + ] + }, + { + "filename": "charts/vald-helm-operator/values.yaml", + "ignoreWords": [ + "readyz" + ] + }, + { + "filename": "charts/vald-readreplica/Chart.yaml", + "ignoreWords": [ + "ykadowak" + ] + }, + { + "filename": "charts/vald-readreplica/templates/deployment.yaml", + "ignoreWords": [ + "valdchart", + "valdname" + ] + }, + { + "filename": "charts/vald-readreplica/templates/hpa.yaml", + "ignoreWords": [ + "valdchart", + "valdname" + ] + }, + { + "filename": "charts/vald-readreplica/templates/svc.yaml", + "ignoreWords": [ + "valdchart", + "valdname" + ] + }, + { + "filename": "charts/vald/README.md", + "ignoreWords": [ + "goroutines" + ] + }, + { + "filename": "charts/vald/crds/valdmirrortarget.yaml", + "ignoreWords": [ + "valdmirrortarget", + "valdmirrortargets", + "vmts" + ] + }, + { + "filename": "charts/vald/templates/_helpers.tpl", + "ignoreWords": [ + "envkey", + "rsslimit", + "vszlimit" + ] + }, + { + "filename": "charts/vald/templates/gateway/mirror/clusterrole.yaml", + "ignoreWords": [ + "valdmirrortargets" + ] + }, + { + "filename": "charts/vald/templates/index/job/readreplica/rotate/clusterrole.yaml", + "ignoreWords": [ + "persistentvolumeclaims" + ] + }, + { + "filename": "charts/vald/templates/index/job/readreplica/rotate/configmap.yaml", + "ignoreWords": [ + "envkey" + ] + }, + { + "filename": "charts/vald/values.schema.json", + "ignoreWords": [ + "goroutines" + ] + }, + { + "filename": "charts/vald/values.yaml", + "ignoreWords": [ + "goroutines" + ] + }, + { + "filename": "cmd/tools/cli/benchmark/core/main.go", + "ignoreWords": [ + "pfile", + "vmdata", + "vmexe", + "vmlib", + "vmlock", + "vmpin", + "vmpte", + "vmstack", + "vmswap" + ] + }, + { + "filename": "dockers/binfmt/Dockerfile", + "ignoreWords": [ + "tonistiigi" + ] + }, + { + "filename": "dockers/ci/base/Dockerfile", + "ignoreWords": [ + "graphviz" + ] + }, + { + "filename": "dockers/ci/base/README.md", + "ignoreWords": [ + "titile" + ] + }, + { + "filename": "dockers/dev/Dockerfile", + "ignoreWords": [ + "gomodifytags", + "graphviz", + "staticcheck" + ] + }, + { + "filename": "docs/api/build_proto.md", + "ignoreWords": [ + "PROTOS", + "chrono", + "nanos", + "protos", + "rustc" + ] + }, + { + "filename": "docs/contributing/coding-style.md", + "ignoreWords": [ + "Godoc", + "Roundtripper", + "Structs", + "Subtests", + "crlfmt", + "godoc", + "httputil", + "ioutil", + "roundtripper", + "structs", + "subtests" + ] + }, + { + "filename": "docs/overview/about-vald.md", + "ignoreWords": [ + "rebalancing", + "rerank" + ] + }, + { + "filename": "docs/overview/component/agent.md", + "ignoreWords": [ + "verctors" + ] + }, + { + "filename": "docs/overview/component/discoverer.md", + "ignoreWords": [ + "nodeby" + ] + }, + { + "filename": "docs/performance/continuous-benchmark.md", + "ignoreWords": [ + "vbor" + ] + }, + { + "filename": "docs/performance/loadtest.md", + "ignoreWords": [ + "GOMAXPROCS", + "maxprocs", + "streaminsert" + ] + }, + { + "filename": "docs/tutorial/get-started-with-faiss-agent.md", + "ignoreWords": [ + "cvspq", + "jrnlw" + ] + }, + { + "filename": "docs/tutorial/get-started.md", + "ignoreWords": [ + "cvspq", + "getstarted", + "jrnlw", + "loadbalancer" + ] + }, + { + "filename": "docs/tutorial/vald-agent-standalone-on-docker.md", + "ignoreWords": [ + "GOMAXPROCS", + "maxprocs" + ] + }, + { + "filename": "docs/tutorial/vald-multicluster-on-k8s.md", + "ignoreWords": [ + "brbsp", + "dnxbb", + "ghlpx", + "gzcr", + "hbklj", + "kgrdf", + "multicluster", + "vjbqx", + "vlmpg", + "wtlcv", + "xmws" + ] + }, + { + "filename": "docs/usecase/usage-example.md", + "ignoreWords": [ + "vectorizing" + ] + }, + { + "filename": "docs/user-guides/backup-configuration.md", + "ignoreWords": [ + "ACCESSS" + ] + }, + { + "filename": "docs/user-guides/capacity-planning.md", + "ignoreWords": [ + "Burstable", + "burstable" + ] + }, + { + "filename": "docs/user-guides/client-api-config.md", + "ignoreWords": [ + "Milli", + "achive", + "rerank" + ] + }, + { + "filename": "docs/user-guides/cluster-role-binding.md", + "ignoreWords": [ + "clusterrolebinding", + "finalizers", + "retrive", + "valdmirrortargets" + ] + }, + { + "filename": "docs/user-guides/deployment.md", + "ignoreWords": [ + "finalizers", + "valdhelmopratorreleases" + ] + }, + { + "filename": "example/helm/values-standalone-agent-ngt.yaml", + "ignoreWords": [ + "mnist's" + ] + }, + { + "filename": "example/helm/values-with-pyroscope.yaml", + "ignoreWords": [ + "serversscheme" + ] + }, + { + "filename": "example/helm/values.yaml", + "ignoreWords": [ + "Agnet", + "mnist's", + "serversscheme" + ] + }, + { + "filename": "example/manifest/scylla/configmap.yaml", + "ignoreWords": [ + "initdb" + ] + }, + { + "filename": "example/manifest/scylla/job.yaml", + "ignoreWords": [ + "cqlsh", + "initdb" + ] + }, + { + "filename": "go.mod", + "ignoreWords": [ + "adal", + "afero", + "ajstarks", + "amqp", + "ansiterm", + "antihax", + "appengine", + "armon", + "astcopy", + "astequal", + "autorest", + "azcore", + "azidentity", + "benbjohnson", + "beorn", + "blackfriday", + "bmizerany", + "boombuler", + "buger", + "bytefmt", + "campoy", + "cenkalti", + "cespare", + "chunkreader", + "chzyer", + "cloudfoundry", + "cloudsql", + "cmdflag", + "colorstring", + "configsources", + "cpuguy", + "cpuid", + "creack", + "davecgh", + "dbus", + "dejavu", + "demangle", + "denisenkom", + "devigned", + "dgryski", + "difflib", + "diskv", + "dnaeon", + "easyjson", + "embedmd", + "emicklei", + "errcheck", + "evanphx", + "eventstream", + "fastuuid", + "felixge", + "fgprof", + "filippo", + "firestore", + "flowrate", + "fogleman", + "fortytw", + "fpdf", + "frankban", + "freetype", + "glfw", + "goautoneg", + "gobwas", + "godbus", + "godebug", + "godeltaprof", + "gofpdf", + "gofpdi", + "gofrs", + "gofuzz", + "gogrep", + "gojsonpointer", + "gojsonreference", + "gojsonschema", + "gomega", + "gomodules", + "gonic", + "gostub", + "gotool", + "gover", + "gregjones", + "groupcache", + "grpcreplay", + "hailocab", + "hanwen", + "honnef", + "httpcache", + "httpfs", + "httphead", + "httpreplay", + "httpsnoop", + "iancoleman", + "ianlancetaylor", + "imdario", + "imds", + "inconshreveable", + "isatty", + "jackc", + "jessevdk", + "jmespath", + "jmoiron", + "joho", + "josharian", + "jsonparser", + "jsonpointer", + "jsonreference", + "jstemmer", + "kisielk", + "kyaml", + "kylelemons", + "leaktest", + "leodido", + "liggitt", + "logex", + "logfmt", + "logr", + "lucasb", + "mailru", + "mattn", + "matttproud", + "mergo", + "mitchellh", + "modocache", + "monochromegane", + "montanaflynn", + "mountinfo", + "mssqldb", + "munnerz", + "nhooyr", + "niemeyer", + "nxadm", + "objx", + "ocsql", + "onsi", + "otelhttp", + "otlpmetric", + "otlpmetricgrpc", + "otlptracegrpc", + "peterbourgon", + "pflag", + "pgio", + "pgmock", + "pgpassfile", + "pgproto", + "pgservicefile", + "pgtype", + "phpdave", + "pierrec", + "pmezard", + "prashantv", + "progressbar", + "quicktest", + "ratelimit", + "replayers", + "rogpeppe", + "russross", + "ruudk", + "sbinet", + "schollz", + "secretmanager", + "sergi", + "shlex", + "shopspring", + "shurcoo", + "sirupsen", + "spdystream", + "sqlexp", + "sqlmock", + "sqlx", + "starlark", + "stdinfo", + "stdr", + "stix", + "stoewer", + "strcase", + "strparse", + "tabwriter", + "toolsmith", + "treeprint", + "typeparams", + "udpa", + "ugorji", + "vfsgen", + "xeipuuv", + "xlab", + "xxhash", + "zapr" + ] + }, + { + "filename": "hack/benchmark/assets/x1b/loader.go", + "ignoreWords": [ + "fname" + ] + }, + { + "filename": "hack/benchmark/assets/x1b/loader_test.go", + "ignoreWords": [ + "fname" + ] + }, + { + "filename": "hack/benchmark/internal/db/nosql/cassandra/cassandra_test.go", + "ignoreWords": [ + "Metas", + "metas" + ] + }, + { + "filename": "hack/benchmark/internal/db/nosql/cassandra/testdata.json", + "ignoreWords": [ + "Adipisicing", + "Aliqua", + "Aliquip", + "Amet", + "Aute", + "Cillum", + "Commodo", + "Consequat", + "Cupidatat", + "Deserunt", + "Dolore", + "Duis", + "Eiusmod", + "Elit", + "Enim", + "Excepteur", + "Fugiat", + "Incididunt", + "Irure", + "Labore", + "Laboris", + "Laborum", + "Mollit", + "Nostrud", + "Nulla", + "Occaecat", + "Officia", + "Pariatur", + "Proident", + "Quis", + "Reprehenderit", + "Sint", + "Sunt", + "Tempor", + "Ullamco", + "Velit", + "Veniam", + "Voluptate", + "adipisicing", + "aliqua", + "aliquip", + "amet", + "aute", + "cillum", + "commodo", + "consequat", + "cupidatat", + "deserunt", + "dolore", + "duis", + "eiusmod", + "elit", + "enim", + "excepteur", + "fugiat", + "incididunt", + "irure", + "labore", + "laboris", + "laborum", + "mollit", + "nostrud", + "nulla", + "occaecat", + "officia", + "pariatur", + "proident", + "quis", + "reprehenderit", + "sint", + "sunt", + "tempor", + "ullamco", + "velit", + "veniam", + "voluptate" + ] + }, + { + "filename": "hack/benchmark/internal/starter/agent/core/ngt/option.go", + "ignoreWords": [ + "dtype", + "otype" + ] + }, + { + "filename": "hack/benchmark/src/singleflight/singleflight_bench_test.go", + "ignoreWords": [ + "durs", + "resultsmap", + "singlefligh", + "stdsingleflight" + ] + }, + { + "filename": "hack/docker/gen/main.go", + "ignoreWords": [ + "Inernal", + "TARGETOS", + "WORKDIR", + "Workdir", + "epkg", + "gomodifytags", + "graphviz", + "tmpl", + "tonistiigi", + "workdir" + ] + }, + { + "filename": "hack/go.mod.default", + "ignoreWords": [ + "adal", + "afero", + "ajstarks", + "amqp", + "antihax", + "appengine", + "armon", + "astcopy", + "astequal", + "autorest", + "azcore", + "azidentity", + "benbjohnson", + "beorn", + "blackfriday", + "bmizerany", + "boombuler", + "buger", + "bytefmt", + "cenkalti", + "cespare", + "chunkreader", + "chzyer", + "cloudfoundry", + "cloudsql", + "cmdflag", + "colorstring", + "configsources", + "cpuguy", + "cpuid", + "creack", + "davecgh", + "dbus", + "dejavu", + "demangle", + "denisenkom", + "devigned", + "dgryski", + "difflib", + "diskv", + "dnaeon", + "easyjson", + "emicklei", + "errcheck", + "evanphx", + "eventstream", + "fastuuid", + "firestore", + "fogleman", + "fortytw", + "fpdf", + "frankban", + "freetype", + "glfw", + "goautoneg", + "gobwas", + "godbus", + "godebug", + "godeltaprof", + "gofpdf", + "gofpdi", + "gofrs", + "gofuzz", + "gogrep", + "gojsonpointer", + "gojsonreference", + "gojsonschema", + "gomega", + "gomodules", + "gonic", + "gostub", + "gotool", + "gover", + "gregjones", + "groupcache", + "grpcreplay", + "hailocab", + "hanwen", + "honnef", + "httpcache", + "httpfs", + "httphead", + "httpreplay", + "iancoleman", + "ianlancetaylor", + "imds", + "inconshreveable", + "isatty", + "jackc", + "jessevdk", + "jmespath", + "jmoiron", + "joho", + "josharian", + "jsonparser", + "jsonpointer", + "jsonreference", + "jstemmer", + "kisielk", + "kylelemons", + "leaktest", + "leodido", + "liggitt", + "logex", + "logfmt", + "logr", + "lucasb", + "mailru", + "mattn", + "matttproud", + "mitchellh", + "modocache", + "monochromegane", + "montanaflynn", + "mountinfo", + "mssqldb", + "munnerz", + "nhooyr", + "niemeyer", + "nxadm", + "objx", + "ocsql", + "onsi", + "otlpmetric", + "otlpmetricgrpc", + "otlptracegrpc", + "peterbourgon", + "pflag", + "pgio", + "pgmock", + "pgpassfile", + "pgproto", + "pgservicefile", + "pgtype", + "phpdave", + "pierrec", + "pmezard", + "prashantv", + "progressbar", + "quicktest", + "replayers", + "rogpeppe", + "russross", + "ruudk", + "sbinet", + "schollz", + "secretmanager", + "sergi", + "shlex", + "shopspring", + "shurcoo", + "sirupsen", + "spdystream", + "sqlexp", + "sqlmock", + "sqlx", + "starlark", + "stdinfo", + "stdr", + "stix", + "stoewer", + "strcase", + "strparse", + "tabwriter", + "toolsmith", + "treeprint", + "typeparams", + "udpa", + "ugorji", + "vfsgen", + "xeipuuv", + "xlab", + "xxhash", + "zapr" + ] + }, + { + "filename": "hack/gorules/rules_test.go", + "ignoreWords": [ + "analysistest" + ] + }, + { + "filename": "hack/gorules/testdata/tests.go", + "ignoreWords": [ + "Fmts", + "newname" + ] + }, + { + "filename": "hack/license/gen/main.go", + "ignoreWords": [ + "Pipefile", + "contributorsrc", + "dirwalk", + "gitmodules", + "helmignore", + "tmpl", + "webp", + "whitesource" + ] + }, + { + "filename": "hack/tools/metrics/main.go", + "ignoreWords": [ + "lucasb", + "vgsvg" + ] + }, + { + "filename": "internal/backoff/backoff_test.go", + "ignoreWords": [ + "Timelimit", + "timelimit" + ] + }, + { + "filename": "internal/circuitbreaker/breaker_test.go", + "ignoreWords": [ + "resetted" + ] + }, + { + "filename": "internal/client/v1/client/client.go", + "ignoreWords": [ + "Upsertor" + ] + }, + { + "filename": "internal/client/v1/client/discoverer/discover_test.go", + "ignoreWords": [ + "copylocks", + "govet" + ] + }, + { + "filename": "internal/compress/gzip_option_test.go", + "ignoreWords": [ + "zdtd" + ] + }, + { + "filename": "internal/compress/lz4/lz4.go", + "ignoreWords": [ + "pierrec" + ] + }, + { + "filename": "internal/compress/zstd_option_test.go", + "ignoreWords": [ + "zdtd" + ] + }, + { + "filename": "internal/compress/zstd_test.go", + "ignoreWords": [ + "decom", + "vecotr" + ] + }, + { + "filename": "internal/config/backup_test.go", + "ignoreWords": [ + "healthcheck" + ] + }, + { + "filename": "internal/config/blob.go", + "ignoreWords": [ + "storaget" + ] + }, + { + "filename": "internal/config/blob_test.go", + "ignoreWords": [ + "CLOUDSTORAGECONFIG" + ] + }, + { + "filename": "internal/config/cassandra.go", + "ignoreWords": [ + "TLSCA" + ] + }, + { + "filename": "internal/config/cassandra_test.go", + "ignoreWords": [ + "localserial" + ] + }, + { + "filename": "internal/config/compress_test.go", + "ignoreWords": [ + "COMPRESSCORE", + "COMPRESSORREGISTERER" + ] + }, + { + "filename": "internal/config/config.go", + "ignoreWords": [ + "dnum", + "rdst", + "snum", + "vdst" + ] + }, + { + "filename": "internal/config/config_test.go", + "ignoreWords": [ + "GETACTUALVALUE", + "GETACTUALVALUES", + "GLOBALCONFIG", + "fname" + ] + }, + { + "filename": "internal/config/faiss.go", + "ignoreWords": [ + "Voronoi", + "subquantizers", + "subvector" + ] + }, + { + "filename": "internal/config/filter_test.go", + "ignoreWords": [ + "sufix" + ] + }, + { + "filename": "internal/config/gateway_test.go", + "ignoreWords": [ + "bmanager", + "efilter", + "ireplica" + ] + }, + { + "filename": "internal/config/grpc.go", + "ignoreWords": [ + "Dail" + ] + }, + { + "filename": "internal/config/grpc_test.go", + "ignoreWords": [ + "DIALOPTION", + "GRPCCLIENT", + "GRPCCLIENTKEEPALIVE", + "healthcheck" + ] + }, + { + "filename": "internal/config/observability_test.go", + "ignoreWords": [ + "servicename" + ] + }, + { + "filename": "internal/config/redis_test.go", + "ignoreWords": [ + "Timelimit", + "timelimit" + ] + }, + { + "filename": "internal/config/server_test.go", + "ignoreWords": [ + "GPRC", + "GRPCKEEPALIVE" + ] + }, + { + "filename": "internal/config/sidecar_test.go", + "ignoreWords": [ + "AGENTSIDECAR" + ] + }, + { + "filename": "internal/conv/conv.go", + "ignoreWords": [ + "Atobs" + ] + }, + { + "filename": "internal/core/algorithm/faiss/Capi.cpp", + "ignoreWords": [ + "IVFPQ", + "Quantizer", + "quantizer", + "xids" + ] + }, + { + "filename": "internal/core/algorithm/faiss/Capi.h", + "ignoreWords": [ + "Quantizer", + "quantizer", + "xids" + ] + }, + { + "filename": "internal/core/algorithm/faiss/faiss.go", + "ignoreWords": [ + "lfaiss", + "ntotal", + "strage", + "xids" + ] + }, + { + "filename": "internal/core/algorithm/faiss/option.go", + "ignoreWords": [ + "lfaiss" + ] + }, + { + "filename": "internal/core/algorithm/ngt/Makefile", + "ignoreWords": [ + "benchmem" + ] + }, + { + "filename": "internal/core/algorithm/ngt/ngt.go", + "ignoreWords": [ + "bulkinsert", + "bulkremove", + "cstats", + "lngt", + "ospace", + "stdlib" + ] + }, + { + "filename": "internal/core/algorithm/ngt/ngt_test.go", + "ignoreWords": [ + "bulkinsert", + "ospace" + ] + }, + { + "filename": "internal/core/algorithm/ngt/option.go", + "ignoreWords": [ + "dotp", + "dproduct", + "halffloat", + "innerp", + "iproduct", + "lngt", + "nang", + "nangle", + "ncos", + "ncosine", + "normalizedang", + "normalizedcos", + "sparsejac" + ] + }, + { + "filename": "internal/core/algorithm/ngt/option_test.go", + "ignoreWords": [ + "nang", + "ncos" + ] + }, + { + "filename": "internal/db/kvs/bbolt/bbolt_test.go", + "ignoreWords": [ + "testfunc" + ] + }, + { + "filename": "internal/db/kvs/bbolt/option.go", + "ignoreWords": [ + "Freelist" + ] + }, + { + "filename": "internal/db/kvs/bbolt/option_test.go", + "ignoreWords": [ + "Freelist" + ] + }, + { + "filename": "internal/db/kvs/pogreb/options.go", + "ignoreWords": [ + "deafult" + ] + }, + { + "filename": "internal/db/kvs/pogreb/pogreb.go", + "ignoreWords": [ + "deafult" + ] + }, + { + "filename": "internal/db/kvs/redis/delete.go", + "ignoreWords": [ + "Deleter" + ] + }, + { + "filename": "internal/db/kvs/redis/hook.go", + "ignoreWords": [ + "Cmder" + ] + }, + { + "filename": "internal/db/kvs/redis/option_test.go", + "ignoreWords": [ + "defult" + ] + }, + { + "filename": "internal/db/kvs/redis/redis.go", + "ignoreWords": [ + "Deleter", + "Pipeliner" + ] + }, + { + "filename": "internal/db/kvs/redis/redis_mock.go", + "ignoreWords": [ + "Cmder", + "Pipeliner" + ] + }, + { + "filename": "internal/db/kvs/redis/redis_test.go", + "ignoreWords": [ + "cslots", + "gotc" + ] + }, + { + "filename": "internal/db/nosql/cassandra/cassandra.go", + "ignoreWords": [ + "Queryx", + "cmps", + "configuation", + "wlhf" + ] + }, + { + "filename": "internal/db/nosql/cassandra/cassandra_test.go", + "ignoreWords": [ + "Debouncer", + "Queryx", + "cmps", + "dchf", + "selete", + "unavilable", + "wlhf" + ] + }, + { + "filename": "internal/db/nosql/cassandra/delete.go", + "ignoreWords": [ + "Deleter" + ] + }, + { + "filename": "internal/db/nosql/cassandra/option.go", + "ignoreWords": [ + "TLSCA", + "eachquorum", + "localone", + "localquorum", + "localserial" + ] + }, + { + "filename": "internal/db/nosql/cassandra/option_test.go", + "ignoreWords": [ + "TLSCA" + ] + }, + { + "filename": "internal/db/rdb/mysql/mysql_test.go", + "ignoreWords": [ + "insertbysql", + "loadcontext" + ] + }, + { + "filename": "internal/db/rdb/mysql/option_test.go", + "ignoreWords": [ + "valddb", + "valdmysql" + ] + }, + { + "filename": "internal/db/storage/blob/cloudstorage/cloudstorage.go", + "ignoreWords": [ + "iblob" + ] + }, + { + "filename": "internal/db/storage/blob/cloudstorage/cloudstorage_test.go", + "ignoreWords": [ + "iblob" + ] + }, + { + "filename": "internal/db/storage/blob/cloudstorage/option.go", + "ignoreWords": [ + "urlstr" + ] + }, + { + "filename": "internal/db/storage/blob/s3/reader/reader_test.go", + "ignoreWords": [ + "roop" + ] + }, + { + "filename": "internal/db/storage/blob/s3/sdk/s3/s3manager/s3manager.go", + "ignoreWords": [ + "mngr" + ] + }, + { + "filename": "internal/db/storage/blob/s3/session/session_test.go", + "ignoreWords": [ + "btop", + "forcepathstyle", + "httpclient", + "itop", + "maxretries" + ] + }, + { + "filename": "internal/errors/benchmark.go", + "ignoreWords": [ + "benchjob", + "benchscenario", + "tbenchjob", + "tbenchscenario" + ] + }, + { + "filename": "internal/errors/cassandra.go", + "ignoreWords": [ + "consistetncy", + "tcql" + ] + }, + { + "filename": "internal/errors/cassandra_test.go", + "ignoreWords": [ + "consistetncy", + "tcql" + ] + }, + { + "filename": "internal/errors/circuitbreaker.go", + "ignoreWords": [ + "errstr" + ] + }, + { + "filename": "internal/errors/compressor.go", + "ignoreWords": [ + "registerers" + ] + }, + { + "filename": "internal/errors/compressor_test.go", + "ignoreWords": [ + "leve", + "registerers" + ] + }, + { + "filename": "internal/errors/errors_test.go", + "ignoreWords": [ + "Unwarp", + "uncomparable", + "unwrapd" + ] + }, + { + "filename": "internal/errors/file.go", + "ignoreWords": [ + "fitos" + ] + }, + { + "filename": "internal/errors/file_test.go", + "ignoreWords": [ + "fitos" + ] + }, + { + "filename": "internal/errors/lb.go", + "ignoreWords": [ + "Insuffcient" + ] + }, + { + "filename": "internal/errors/mysql_test.go", + "ignoreWords": [ + "vaef" + ] + }, + { + "filename": "internal/errors/redis.go", + "ignoreWords": [ + "KVVK" + ] + }, + { + "filename": "internal/errors/redis_test.go", + "ignoreWords": [ + "KVVK" + ] + }, + { + "filename": "internal/errors/vald_test.go", + "ignoreWords": [ + "tvald" + ] + }, + { + "filename": "internal/file/file_test.go", + "ignoreWords": [ + "utiltest" + ] + }, + { + "filename": "internal/info/info.go", + "ignoreWords": [ + "procs", + "strs" + ] + }, + { + "filename": "internal/k8s/client/client.go", + "ignoreWords": [ + "applyconfigurations", + "applycorev", + "clientgoscheme", + "snapshotv", + "volumesnapshot" + ] + }, + { + "filename": "internal/k8s/job/job.go", + "ignoreWords": [ + "batchv" + ] + }, + { + "filename": "internal/k8s/option.go", + "ignoreWords": [ + "mertics" + ] + }, + { + "filename": "internal/k8s/reconciler.go", + "ignoreWords": [ + "mertics", + "mserver" + ] + }, + { + "filename": "internal/k8s/reconciler_test.go", + "ignoreWords": [ + "mertics" + ] + }, + { + "filename": "internal/k8s/types.go", + "ignoreWords": [ + "appsv", + "batchv", + "snapshotv", + "volumesnapshot" + ] + }, + { + "filename": "internal/k8s/vald/benchmark/api/v1/job_types.go", + "ignoreWords": [ + "deepcopy" + ] + }, + { + "filename": "internal/k8s/vald/benchmark/api/v1/scenario_types.go", + "ignoreWords": [ + "deepcopy" + ] + }, + { + "filename": "internal/k8s/vald/mirror/api/v1/target_types.go", + "ignoreWords": [ + "deepcopy" + ] + }, + { + "filename": "internal/log/format/format.go", + "ignoreWords": [ + "LTSV", + "ltsv" + ] + }, + { + "filename": "internal/log/format/format_test.go", + "ignoreWords": [ + "LTSV", + "ltsv" + ] + }, + { + "filename": "internal/log/glg/glg.go", + "ignoreWords": [ + "DEBG", + "dstr" + ] + }, + { + "filename": "internal/log/glg/glg_test.go", + "ignoreWords": [ + "DEBG" + ] + }, + { + "filename": "internal/log/level/level.go", + "ignoreWords": [ + "DEBG", + "ERRO", + "FATA" + ] + }, + { + "filename": "internal/log/logger/iface.go", + "ignoreWords": [ + "finalizer" + ] + }, + { + "filename": "internal/log/logger/type.go", + "ignoreWords": [ + "Atot" + ] + }, + { + "filename": "internal/log/logger/type_test.go", + "ignoreWords": [ + "Atot", + "ZEROL" + ] + }, + { + "filename": "internal/log/nop/nop.go", + "ignoreWords": [ + "finalizer" + ] + }, + { + "filename": "internal/log/option.go", + "ignoreWords": [ + "Atot" + ] + }, + { + "filename": "internal/log/retry/retry_test.go", + "ignoreWords": [ + "foramt", + "gotr", + "wantr" + ] + }, + { + "filename": "internal/net/control/control.go", + "ignoreWords": [ + "boolint" + ] + }, + { + "filename": "internal/net/control/control_test.go", + "ignoreWords": [ + "boolint" + ] + }, + { + "filename": "internal/net/control/control_unix.go", + "ignoreWords": [ + "uapi" + ] + }, + { + "filename": "internal/net/dialer.go", + "ignoreWords": [ + "nport", + "tconnectionstate", + "tder" + ] + }, + { + "filename": "internal/net/dialer_test.go", + "ignoreWords": [ + "Nums", + "copylocks", + "govet" + ] + }, + { + "filename": "internal/net/grpc/client.go", + "ignoreWords": [ + "gbackoff", + "parseable", + "rebalancing" + ] + }, + { + "filename": "internal/net/grpc/client_test.go", + "ignoreWords": [ + "gbackoff" + ] + }, + { + "filename": "internal/net/grpc/errdetails/errdetails.go", + "ignoreWords": [ + "iobjs" + ] + }, + { + "filename": "internal/net/grpc/logger/logger.go", + "ignoreWords": [ + "Errorln", + "Infoln", + "Warningf", + "Warningln", + "grpclog" + ] + }, + { + "filename": "internal/net/grpc/logger/logger_test.go", + "ignoreWords": [ + "Errorln", + "Infoln", + "Warningf", + "Warningln", + "grpclog" + ] + }, + { + "filename": "internal/net/grpc/option.go", + "ignoreWords": [ + "gbackoff", + "metricinterceptor", + "traceinterceptor" + ] + }, + { + "filename": "internal/net/grpc/pool/pool.go", + "ignoreWords": [ + "tdelay" + ] + }, + { + "filename": "internal/net/grpc/proto/proto.go", + "ignoreWords": [ + "protoiface" + ] + }, + { + "filename": "internal/net/grpc/server_test.go", + "ignoreWords": [ + "channelz" + ] + }, + { + "filename": "internal/net/http/client/option.go", + "ignoreWords": [ + "Alives", + "Keepalives" + ] + }, + { + "filename": "internal/net/http/client/option_test.go", + "ignoreWords": [ + "Alives", + "Keepalives" + ] + }, + { + "filename": "internal/net/http/dump/dump_test.go", + "ignoreWords": [ + "hoge" + ] + }, + { + "filename": "internal/net/http/metrics/pprof.go", + "ignoreWords": [ + "felixge", + "fgprof", + "godeltaprof", + "pyprof", + "threadcreate" + ] + }, + { + "filename": "internal/net/http/middleware/timeout_test.go", + "ignoreWords": [ + "extermemly" + ] + }, + { + "filename": "internal/net/http/transport/roundtrip.go", + "ignoreWords": [ + "Roundtripper", + "roundtripper" + ] + }, + { + "filename": "internal/net/net.go", + "ignoreWords": [ + "IGMP", + "hostport", + "igmp" + ] + }, + { + "filename": "internal/net/net_test.go", + "ignoreWords": [ + "IGMP", + "hostport", + "igmp" + ] + }, + { + "filename": "internal/observability/exporter/otlp/otlp.go", + "ignoreWords": [ + "otlpmetric", + "otlpmetricgrpc", + "otlptracegrpc", + "semconv" + ] + }, + { + "filename": "internal/observability/metrics/grpc/grpc.go", + "ignoreWords": [ + "Desctiption" + ] + }, + { + "filename": "internal/observability/metrics/mem/index/index.go", + "ignoreWords": [ + "mstats" + ] + }, + { + "filename": "internal/observability/metrics/mem/mem.go", + "ignoreWords": [ + "Memstats", + "Shmem", + "buckhash", + "mcache", + "mspan", + "oinsts", + "shmem", + "toal", + "vmdata", + "vmexe", + "vmlck", + "vmlib", + "vmpin", + "vmpte", + "vmstk", + "vmswap" + ] + }, + { + "filename": "internal/observability/metrics/mem/mem_test.go", + "ignoreWords": [ + "Memstats" + ] + }, + { + "filename": "internal/observability/trace/status.go", + "ignoreWords": [ + "RPCGRPC", + "ocodes", + "semconv" + ] + }, + { + "filename": "internal/params/params.go", + "ignoreWords": [ + "commnad" + ] + }, + { + "filename": "internal/runner/runner.go", + "ignoreWords": [ + "maxprocs", + "mfunc", + "timelocation" + ] + }, + { + "filename": "internal/safety/safety.go", + "ignoreWords": [ + "revcover", + "runtimer" + ] + }, + { + "filename": "internal/servers/option.go", + "ignoreWords": [ + "strg" + ] + }, + { + "filename": "internal/servers/option_test.go", + "ignoreWords": [ + "gsrv", + "strg" + ] + }, + { + "filename": "internal/servers/server/option.go", + "ignoreWords": [ + "accesslog", + "accessloginterceptor", + "metricinterceptor", + "recoverinterceptor", + "traceinterceptor" + ] + }, + { + "filename": "internal/servers/server/server_test.go", + "ignoreWords": [ + "prestart" + ] + }, + { + "filename": "internal/servers/servers_test.go", + "ignoreWords": [ + "strg" + ] + }, + { + "filename": "internal/strings/strings_benchmark_test.go", + "ignoreWords": [ + "tstr" + ] + }, + { + "filename": "internal/sync/errgroup/group_test.go", + "ignoreWords": [ + "acquireings", + "goroutne" + ] + }, + { + "filename": "internal/sync/semaphore/semaphore.go", + "ignoreWords": [ + "cancelation" + ] + }, + { + "filename": "internal/sync/semaphore/semaphore_example_test.go", + "ignoreWords": [ + "Collatz", + "collatz", + "nonpositive" + ] + }, + { + "filename": "internal/sync/semaphore/semaphore_test.go", + "ignoreWords": [ + "Doesnt", + "unacquired" + ] + }, + { + "filename": "internal/sync/singleflight/singleflight.go", + "ignoreWords": [ + "chans", + "dups" + ] + }, + { + "filename": "internal/sync/singleflight/singleflight_test.go", + "ignoreWords": [ + "DOCHAN", + "unparam" + ] + }, + { + "filename": "internal/test/data/hdf5/hdf5.go", + "ignoreWords": [ + "Keepalives", + "Neighors" + ] + }, + { + "filename": "internal/test/data/hdf5/option.go", + "ignoreWords": [ + "dataname" + ] + }, + { + "filename": "internal/test/data/vector/gen.go", + "ignoreWords": [ + "irand" + ] + }, + { + "filename": "internal/test/mock/grpc_testify_mock.go", + "ignoreWords": [ + "losm", + "usecases" + ] + }, + { + "filename": "internal/test/mock/k8s/client.go", + "ignoreWords": [ + "crclient" + ] + }, + { + "filename": "internal/timeutil/rate/rate.go", + "ignoreWords": [ + "ratelimit" + ] + }, + { + "filename": "internal/timeutil/rate/rate_test.go", + "ignoreWords": [ + "ratelimit" + ] + }, + { + "filename": "internal/timeutil/time_test.go", + "ignoreWords": [ + "dummystring", + "hoge" + ] + }, + { + "filename": "internal/unit/unit.go", + "ignoreWords": [ + "bytefmt", + "cloudfoundry" + ] + }, + { + "filename": "internal/version/version.go", + "ignoreWords": [ + "curv" + ] + }, + { + "filename": "internal/worker/worker_test.go", + "ignoreWords": [ + "testname" + ] + }, + { + "filename": "k8s/external/minio/deployment.yaml", + "ignoreWords": [ + "ACCESSKEY", + "SECRETKEY" + ] + }, + { + "filename": "k8s/external/minio/mb-job.yaml", + "ignoreWords": [ + "ACCESSKEY", + "SECRETKEY" + ] + }, + { + "filename": "k8s/metrics/grafana/dashboards/00-vald-cluster-overview.yaml", + "ignoreWords": [ + "Misscheduled", + "Qxya", + "misscheduled" + ] + }, + { + "filename": "k8s/metrics/grafana/dashboards/02-vald-discoverer.yaml", + "ignoreWords": [ + "Jkemc", + "Versin" + ] + }, + { + "filename": "k8s/metrics/grafana/dashboards/05-vald-index-manager.yaml", + "ignoreWords": [ + "jowe" + ] + }, + { + "filename": "k8s/metrics/grafana/dashboards/10-vald-benchmark-operator.yaml", + "ignoreWords": [ + "Versin", + "fdewjfx", + "jkxz" + ] + }, + { + "filename": "k8s/metrics/grafana/dashboards/99-vald-agent-memory.yaml", + "ignoreWords": [ + "Memstats", + "buckhash", + "mcache", + "mspan", + "vmdata", + "vmexe", + "vmlck", + "vmlib", + "vmpin", + "vmpte", + "vmstk", + "vmswap" + ] + }, + { + "filename": "k8s/metrics/jaeger/jaeger.yaml", + "ignoreWords": [ + "jaegertracing" + ] + }, + { + "filename": "k8s/metrics/loki/loki.yaml", + "ignoreWords": [ + "boltdb", + "ingester", + "inmemory", + "kvstore", + "lifecycler" + ] + }, + { + "filename": "k8s/metrics/loki/promtail.yaml", + "ignoreWords": [ + "labelmap", + "promtail", + "varlibdockercontainers", + "varlog" + ] + }, + { + "filename": "k8s/metrics/prometheus/configmap.yaml", + "ignoreWords": [ + "cadvisor", + "labelmap" + ] + }, + { + "filename": "k8s/metrics/pyroscope/README.md", + "ignoreWords": [ + "mafests" + ] + }, + { + "filename": "k8s/metrics/pyroscope/base/configmap.yaml", + "ignoreWords": [ + "labelmap" + ] + }, + { + "filename": "k8s/metrics/pyroscope/base/daemonset.yaml", + "ignoreWords": [ + "ebpfspy" + ] + }, + { + "filename": "k8s/metrics/pyroscope/base/kustomization.yaml", + "ignoreWords": [ + "clusterrolebinding" + ] + }, + { + "filename": "k8s/metrics/tempo/jaeger-agent.yaml", + "ignoreWords": [ + "jaegertracing" + ] + }, + { + "filename": "k8s/metrics/tempo/tempo.yaml", + "ignoreWords": [ + "blocklist", + "ingester", + "inmemory", + "kvstore", + "lifecycler" + ] + }, + { + "filename": "k8s/operator/helm/clusterrole.yaml", + "ignoreWords": [ + "clusterrolebindings", + "clusterroles", + "customresourcedefinitions", + "horizontalpodautoscalers", + "networkpolicies", + "persistentvolumeclaims", + "poddisruptionbudgets", + "priorityclasses", + "serviceaccounts", + "statefulsets" + ] + }, + { + "filename": "k8s/operator/helm/crds/valdhelmoperatorrelease.yaml", + "ignoreWords": [ + "vhors" + ] + }, + { + "filename": "k8s/operator/helm/operator.yaml", + "ignoreWords": [ + "readyz" + ] + }, + { + "filename": "k8s/tools/benchmark/job/clusterrolebinding.yaml", + "ignoreWords": [ + "rolebinding" + ] + }, + { + "filename": "k8s/tools/benchmark/job/serviceaccount.yaml", + "ignoreWords": [ + "Versoin" + ] + }, + { + "filename": "k8s/tools/benchmark/operator/clusterrole.yaml", + "ignoreWords": [ + "deletecollection" + ] + }, + { + "filename": "k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml", + "ignoreWords": [ + "vbjs" + ] + }, + { + "filename": "k8s/tools/benchmark/operator/crds/valdbenchmarkoperatorrelease.yaml", + "ignoreWords": [ + "valdbenchmarkoperator", + "valdbenchmarkoperatorreleases", + "vbor", + "vbors" + ] + }, + { + "filename": "k8s/tools/benchmark/operator/crds/valdbenchmarkscenario.yaml", + "ignoreWords": [ + "vbss" + ] + }, + { + "filename": "pkg/agent/core/faiss/handler/grpc/search.go", + "ignoreWords": [ + "createing" + ] + }, + { + "filename": "pkg/agent/core/faiss/service/faiss.go", + "ignoreWords": [ + "Voronoi", + "ntotal", + "saveindex", + "subquantizers", + "tpath", + "tvald" + ] + }, + { + "filename": "pkg/agent/core/faiss/service/option.go", + "ignoreWords": [ + "bdbs", + "brnd" + ] + }, + { + "filename": "pkg/agent/core/faiss/usecase/agentd.go", + "ignoreWords": [ + "faissmetrics" + ] + }, + { + "filename": "pkg/agent/core/ngt/handler/grpc/flush.go", + "ignoreWords": [ + "Cnts", + "cnts" + ] + }, + { + "filename": "pkg/agent/core/ngt/handler/grpc/index_test.go", + "ignoreWords": [ + "exteneral" + ] + }, + { + "filename": "pkg/agent/core/ngt/handler/grpc/insert.go", + "ignoreWords": [ + "vmap" + ] + }, + { + "filename": "pkg/agent/core/ngt/handler/grpc/insert_test.go", + "ignoreWords": [ + "Testingcase", + "joind", + "nonexistid", + "pushinsert" + ] + }, + { + "filename": "pkg/agent/core/ngt/handler/grpc/object_test.go", + "ignoreWords": [ + "testfunc", + "tmock" + ] + }, + { + "filename": "pkg/agent/core/ngt/handler/grpc/update.go", + "ignoreWords": [ + "idis", + "vmap" + ] + }, + { + "filename": "pkg/agent/core/ngt/handler/grpc/update_test.go", + "ignoreWords": [ + "Testint" + ] + }, + { + "filename": "pkg/agent/core/ngt/service/ngt.go", + "ignoreWords": [ + "Nopvq", + "nkvs", + "nobic", + "nopvq", + "saveindex", + "toid", + "tvald" + ] + }, + { + "filename": "pkg/agent/core/ngt/service/ngt_test.go", + "ignoreWords": [ + "Nopvq", + "additionaldigits", + "kvald", + "metafile", + "nobic", + "nopvq", + "testfunc" + ] + }, + { + "filename": "pkg/agent/core/ngt/service/option.go", + "ignoreWords": [ + "bdbs", + "brnd" + ] + }, + { + "filename": "pkg/agent/core/ngt/usecase/agentd.go", + "ignoreWords": [ + "memmetrics", + "ngtmetrics" + ] + }, + { + "filename": "pkg/agent/internal/vqueue/queue.go", + "ignoreWords": [ + "uninserted" + ] + }, + { + "filename": "pkg/agent/internal/vqueue/stateful_test.go", + "ignoreWords": [ + "getvector" + ] + }, + { + "filename": "pkg/agent/sidecar/service/restorer/restorer.go", + "ignoreWords": [ + "Typeflag" + ] + }, + { + "filename": "pkg/discoverer/k8s/service/discover.go", + "ignoreWords": [ + "mnode", + "mpod", + "reconciation", + "svcsmap" + ] + }, + { + "filename": "pkg/discoverer/k8s/service/discover_test.go", + "ignoreWords": [ + "mnode", + "mpod" + ] + }, + { + "filename": "pkg/discoverer/k8s/usecase/discovered.go", + "ignoreWords": [ + "unbackupped" + ] + }, + { + "filename": "pkg/gateway/lb/handler/grpc/aggregation.go", + "ignoreWords": [ + "Aggr", + "Insuffcient", + "aggr", + "fdist", + "fmax", + "timeoutage" + ] + }, + { + "filename": "pkg/gateway/lb/handler/grpc/aggregation_test.go", + "ignoreWords": [ + "Aggr", + "aggr" + ] + }, + { + "filename": "pkg/gateway/lb/handler/grpc/handler.go", + "ignoreWords": [ + "Cnts", + "cnts", + "indegrees", + "outdegrees" + ] + }, + { + "filename": "pkg/gateway/lb/handler/grpc/handler_test.go", + "ignoreWords": [ + "Cnts", + "cnts" + ] + }, + { + "filename": "pkg/gateway/lb/handler/grpc/pairing_heap_test.go", + "ignoreWords": [ + "gids" + ] + }, + { + "filename": "pkg/gateway/lb/handler/grpc/search_benchmark_test.go", + "ignoreWords": [ + "Aggr", + "aggr", + "datas" + ] + }, + { + "filename": "pkg/gateway/mirror/handler/grpc/handler_test.go", + "ignoreWords": [ + "clientmock", + "cmap" + ] + }, + { + "filename": "pkg/gateway/mirror/service/discovery.go", + "ignoreWords": [ + "ctgt", + "ptgt" + ] + }, + { + "filename": "pkg/gateway/mirror/service/discovery_option.go", + "ignoreWords": [ + "datacenter" + ] + }, + { + "filename": "pkg/gateway/mirror/service/mirror_test.go", + "ignoreWords": [ + "grpcmock" + ] + }, + { + "filename": "pkg/gateway/mirror/usecase/vald.go", + "ignoreWords": [ + "mirrormetrics" + ] + }, + { + "filename": "pkg/index/job/creation/service/indexer_test.go", + "ignoreWords": [ + "clientmock", + "grpcmock" + ] + }, + { + "filename": "pkg/index/job/save/service/indexer_test.go", + "ignoreWords": [ + "clientmock", + "grpcmock" + ] + }, + { + "filename": "pkg/manager/index/usecase/indexer.go", + "ignoreWords": [ + "indexmetrics" + ] + }, + { + "filename": "pkg/tools/benchmark/job/config/config.go", + "ignoreWords": [ + "JOBNAME", + "JOBNAMESPACE" + ] + }, + { + "filename": "pkg/tools/benchmark/job/service/insert.go", + "ignoreWords": [ + "Prometeus" + ] + }, + { + "filename": "pkg/tools/benchmark/job/service/job.go", + "ignoreWords": [ + "Neighors", + "USERDEFINED", + "benchjob", + "userdefined" + ] + }, + { + "filename": "pkg/tools/benchmark/job/service/option.go", + "ignoreWords": [ + "Concurency", + "USERDEFINED", + "bjns", + "userdefined" + ] + }, + { + "filename": "pkg/tools/benchmark/job/service/option_test.go", + "ignoreWords": [ + "Concurency", + "bjns" + ] + }, + { + "filename": "pkg/tools/benchmark/job/usecase/benchmarkd.go", + "ignoreWords": [ + "Concurency", + "gcli", + "unbackupped", + "usecases", + "vcli" + ] + }, + { + "filename": "pkg/tools/benchmark/operator/service/operator.go", + "ignoreWords": [ + "Progation", + "benchjob", + "benchjobs", + "benchmarkjob", + "benchscenario", + "bjob", + "cbjl", + "cbsl", + "cjobs", + "rcticker", + "wating" + ] + }, + { + "filename": "pkg/tools/benchmark/operator/service/operator_test.go", + "ignoreWords": [ + "benchjobs", + "minsit", + "scneario" + ] + }, + { + "filename": "pkg/tools/benchmark/operator/usecase/benchmarkd.go", + "ignoreWords": [ + "benchmarkmetrics", + "unbackupped", + "usecases" + ] + }, + { + "filename": "pkg/tools/cli/loadtest/assets/dataset.go", + "ignoreWords": [ + "GROUNDTRUTH", + "groundtruth", + "kosarak", + "nytimes" + ] + }, + { + "filename": "pkg/tools/cli/loadtest/assets/hdf5_loader.go", + "ignoreWords": [ + "dset", + "npoints" + ] + }, + { + "filename": "pkg/tools/cli/loadtest/assets/hdf5_loader_test.go", + "ignoreWords": [ + "dset", + "npoints" + ] + }, + { + "filename": "pkg/tools/cli/loadtest/config/config.go", + "ignoreWords": [ + "streaminsert" + ] + }, + { + "filename": "rust/libs/ngt-rs/Cargo.toml", + "ignoreWords": [ + "miette" + ] + }, + { + "filename": "rust/libs/ngt-rs/build.rs", + "ignoreWords": [ + "BFLOAT", + "DNGT", + "dylib", + "fopenmp", + "gomp", + "miette", + "rustc" + ] + }, + { + "filename": "rust/libs/ngt-rs/src/input.cpp", + "ignoreWords": [ + "cpath", + "ngtresults", + "vquery" + ] + }, + { + "filename": "rust/libs/ngt-rs/src/lib.rs", + "ignoreWords": [ + "repr" + ] + }, + { + "filename": "rust/libs/observability/Cargo.toml", + "ignoreWords": [ + "reqwest", + "scopeguard", + "serde" + ] + }, + { + "filename": "rust/libs/observability/src/macros.rs", + "ignoreWords": [ + "Updown" + ] + }, + { + "filename": "rust/libs/proto/src/payload.v1.rs", + "ignoreWords": [ + "repr" + ] + }, + { + "filename": "tests/chaos/chart/README.md", + "ignoreWords": [ + "kbps", + "minburst", + "peakrate" + ] + }, + { + "filename": "tests/chaos/chart/templates/network/bandwidth.yaml", + "ignoreWords": [ + "minburst", + "peakrate" + ] + }, + { + "filename": "tests/chaos/chart/values.yaml", + "ignoreWords": [ + "kbps", + "minburst", + "peakrate" + ] + }, + { + "filename": "tests/e2e/crud/crud_test.go", + "ignoreWords": [ + "ECRUD" + ] + }, + { + "filename": "tests/e2e/kubernetes/client/client.go", + "ignoreWords": [ + "Clientset", + "clientcmd", + "clientset" + ] + }, + { + "filename": "tests/e2e/kubernetes/kubectl/kubectl.go", + "ignoreWords": [ + "rollouts", + "subcmds" + ] + }, + { + "filename": "tests/e2e/kubernetes/portforward/portforward.go", + "ignoreWords": [ + "genericclioptions", + "portforwarder", + "spdy", + "upgrader" + ] + }, + { + "filename": "tests/e2e/operation/stream.go", + "ignoreWords": [ + "evalidator", + "svalidator" + ] + } ] -} +} \ No newline at end of file diff --git a/.gitfiles b/.gitfiles index 64f792a39cd..bc3aad9d2b8 100644 --- a/.gitfiles +++ b/.gitfiles @@ -1,18 +1,3 @@ -# -# Copyright (C) 2019-2024 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# .all-contributorsrc .commit_template .cspell.json @@ -140,7 +125,6 @@ Makefile Makefile.d/actions.mk Makefile.d/bench.mk Makefile.d/build.mk -Makefile.d/client.mk Makefile.d/dependencies.mk Makefile.d/docker.mk Makefile.d/e2e.mk @@ -1904,6 +1888,11 @@ rust/libs/ngt-rs/src/input.h rust/libs/ngt-rs/src/lib.rs rust/libs/ngt/Cargo.toml rust/libs/ngt/src/lib.rs +rust/libs/observability/Cargo.toml +rust/libs/observability/src/config.rs +rust/libs/observability/src/lib.rs +rust/libs/observability/src/macros.rs +rust/libs/observability/src/observability.rs rust/libs/proto/Cargo.toml rust/libs/proto/src/core.v1.tonic.rs rust/libs/proto/src/discoverer.v1.tonic.rs @@ -1965,7 +1954,6 @@ versions/PROTOBUF_VERSION versions/REVIEWDOG_VERSION versions/RUST_VERSION versions/TELEPRESENCE_VERSION -versions/VALDCLI_VERSION versions/VALD_VERSION versions/YQ_VERSION versions/ZLIB_VERSION diff --git a/.github/helm/values/values-correction.yaml b/.github/helm/values/values-correction.yaml index 888931ca6fc..0632c3d2f70 100644 --- a/.github/helm/values/values-correction.yaml +++ b/.github/helm/values/values-correction.yaml @@ -16,7 +16,7 @@ defaults: logging: - level: info + level: debug networkPolicy: enabled: true gateway: diff --git a/.github/workflows/dockers-benchmark-job-image.yml b/.github/workflows/dockers-benchmark-job-image.yml index 2d59b4356c6..7b05be9b09f 100644 --- a/.github/workflows/dockers-benchmark-job-image.yml +++ b/.github/workflows/dockers-benchmark-job-image.yml @@ -27,7 +27,7 @@ on: paths: - ".github/actions/docker-build/action.yaml" - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-benchmak-job-image.yml" + - ".github/workflows/dockers-benchmark-job-image.yml" - "go.mod" - "go.sum" - "internal/**" @@ -44,7 +44,7 @@ on: paths: - ".github/actions/docker-build/action.yaml" - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-benchmak-job-image.yml" + - ".github/workflows/dockers-benchmark-job-image.yml" - "go.mod" - "go.sum" - "internal/**" diff --git a/.github/workflows/dockers-benchmark-operator-image.yaml b/.github/workflows/dockers-benchmark-operator-image.yaml index 2cc23d85b3c..8509ed2545b 100644 --- a/.github/workflows/dockers-benchmark-operator-image.yaml +++ b/.github/workflows/dockers-benchmark-operator-image.yaml @@ -27,7 +27,7 @@ on: paths: - ".github/actions/docker-build/action.yaml" - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-benchmak-operator-image.yml" + - ".github/workflows/dockers-benchmark-operator-image.yml" - "go.mod" - "go.sum" - "internal/**" @@ -44,7 +44,7 @@ on: paths: - ".github/actions/docker-build/action.yaml" - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-benchmak-operator-image.yml" + - ".github/workflows/dockers-benchmark-operator-image.yml" - "go.mod" - "go.sum" - "internal/**" diff --git a/Makefile b/Makefile index 4a5e001f710..2aba75c5e90 100644 --- a/Makefile +++ b/Makefile @@ -103,7 +103,6 @@ PROTOBUF_VERSION := $(eval PROTOBUF_VERSION := $(shell cat versions/PRO REVIEWDOG_VERSION := $(eval REVIEWDOG_VERSION := $(shell cat versions/REVIEWDOG_VERSION))$(REVIEWDOG_VERSION) RUST_VERSION := $(eval RUST_VERSION := $(shell cat versions/RUST_VERSION))$(RUST_VERSION) TELEPRESENCE_VERSION := $(eval TELEPRESENCE_VERSION := $(shell cat versions/TELEPRESENCE_VERSION))$(TELEPRESENCE_VERSION) -VALDCLI_VERSION := $(eval VALDCLI_VERSION := $(shell cat versions/VALDCLI_VERSION))$(VALDCLI_VERSION) YQ_VERSION := $(eval YQ_VERSION := $(shell cat versions/YQ_VERSION))$(YQ_VERSION) ZLIB_VERSION := $(eval ZLIB_VERSION := $(shell cat versions/ZLIB_VERSION))$(ZLIB_VERSION) @@ -455,7 +454,6 @@ init: \ tools/install: \ helm/install \ kind/install \ - valdcli/install \ telepresence/install \ textlint/install @@ -620,10 +618,6 @@ version/helm: version/yq: @echo $(YQ_VERSION) -.PHONY: version/valdcli -version/valdcli: - @echo $(VALDCLI_VERSION) - .PHONY: version/telepresence version/telepresence: @echo $(TELEPRESENCE_VERSION) @@ -737,14 +731,14 @@ files/textlint: \ ## run cspell for document docs/cspell:\ cspell/install - cspell-cli $(ROOTDIR)/docs/**/*.md --show-suggestions $(CSPELL_EXTRA_OPTIONS) + cspell $(ROOTDIR)/docs/**/*.md --show-suggestions $(CSPELL_EXTRA_OPTIONS) .PHONY: files/cspell ## run cspell for document files/cspell: \ files \ cspell/install - cspell-cli $(ROOTDIR)/.gitfiles --show-suggestions $(CSPELL_EXTRA_OPTIONS) + cspell $(ROOTDIR)/.gitfiles --show-suggestions $(CSPELL_EXTRA_OPTIONS) .PHONY: changelog/update ## update changelog @@ -766,7 +760,6 @@ changelog/next/print: include Makefile.d/actions.mk include Makefile.d/bench.mk include Makefile.d/build.mk -include Makefile.d/client.mk include Makefile.d/dependencies.mk include Makefile.d/docker.mk include Makefile.d/e2e.mk @@ -775,7 +768,7 @@ include Makefile.d/helm.mk include Makefile.d/k3d.mk include Makefile.d/k8s.mk include Makefile.d/kind.mk +include Makefile.d/minikube.mk include Makefile.d/proto.mk include Makefile.d/test.mk include Makefile.d/tools.mk -include Makefile.d/minikube.mk diff --git a/Makefile.d/client.mk b/Makefile.d/client.mk deleted file mode 100644 index 1c5c7aa9bf0..00000000000 --- a/Makefile.d/client.mk +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright (C) 2019-2024 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -.PHONY: valdcli/install -## install valdcli -valdcli/install: $(BINDIR)/valdcli - -ifeq ($(UNAME),Darwin) -$(BINDIR)/valdcli: - mkdir -p $(BINDIR) - curl -fsSLO https://github.com/rinx/vald-client-clj/releases/download/$(VALDCLI_VERSION)/valdcli-macos.zip - unzip valdcli-macos.zip - rm -f valdcli-macos.zip - mv valdcli $(BINDIR)/valdcli -else -$(BINDIR)/valdcli: - mkdir -p $(BINDIR) - curl -fsSLO https://github.com/rinx/vald-client-clj/releases/download/$(VALDCLI_VERSION)/valdcli-linux-static.zip - unzip valdcli-linux-static.zip - rm -f valdcli-linux-static.zip - mv valdcli $(BINDIR)/valdcli -endif - -.PHONY: valdcli/xpanes/insert -## insert randomized vectors using valdcli and xpanes -valdcli/xpanes/insert: - xpanes -c "valdcli rand-vecs -n $(NUMBER) -d $(DIMENSION) --gaussian --gaussian-mean $(MEAN) --gaussian-stddev $(STDDEV) --with-ids | valdcli -h $(HOST) -p $(PORT) stream-insert --elapsed-time" $$(seq 1 $(NUMPANES)) - -.PHONY: valdcli/xpanes/search -## search randomized vectors using valdcli and xpanes -valdcli/xpanes/search: - xpanes -c "valdcli rand-vecs -n $(NUMBER) -d $(DIMENSION) --gaussian --gaussian-mean $(MEAN) --gaussian-stddev $(STDDEV) | valdcli -h $(HOST) -p $(PORT) stream-search --elapsed-time" $$(seq 1 $(NUMPANES)) diff --git a/Makefile.d/dependencies.mk b/Makefile.d/dependencies.mk index 928ed8c9d26..596e9ec1b29 100644 --- a/Makefile.d/dependencies.mk +++ b/Makefile.d/dependencies.mk @@ -39,7 +39,6 @@ update/libs: \ update/rust \ update/telepresence \ update/vald \ - update/valdcli \ update/yq \ update/zlib @@ -227,11 +226,6 @@ update/hdf5: update/vald: curl -fsSL https://api.github.com/repos/$(REPO)/releases/latest | grep -Po '"tag_name": "\K.*?(?=")' > $(ROOTDIR)/versions/VALD_VERSION -.PHONY: update/valdcli -## update vald client library made by clojure self version -update/valdcli: - curl -fsSL https://api.github.com/repos/$(REPO)-client-clj/releases/latest | grep -Po '"tag_name": "\K.*?(?=")' > $(ROOTDIR)/versions/VALDCLI_VERSION - .PHONY: update/template ## update PULL_REQUEST_TEMPLATE and ISSUE_TEMPLATE update/template: diff --git a/Makefile.d/tools.mk b/Makefile.d/tools.mk index 3502f2e8792..c9968e5d75f 100644 --- a/Makefile.d/tools.mk +++ b/Makefile.d/tools.mk @@ -89,7 +89,34 @@ textlint/ci/install: cspell/install: $(NPM_GLOBAL_PREFIX)/bin/cspell $(NPM_GLOBAL_PREFIX)/bin/cspell: - npm install -g cspell@latest + npm install -g cspell@latest \ + @cspell/dict-cpp \ + @cspell/dict-docker \ + @cspell/dict-en_us \ + @cspell/dict-fullstack \ + @cspell/dict-git \ + @cspell/dict-golang \ + @cspell/dict-k8s \ + @cspell/dict-makefile \ + @cspell/dict-markdown \ + @cspell/dict-npm \ + @cspell/dict-public-licenses \ + @cspell/dict-rust \ + @cspell/dict-shell + cspell link add @cspell/dict-cpp + cspell link add @cspell/dict-docker + cspell link add @cspell/dict-en_us + cspell link add @cspell/dict-fullstack + cspell link add @cspell/dict-git + cspell link add @cspell/dict-golang + cspell link add @cspell/dict-k8s + cspell link add @cspell/dict-makefile + cspell link add @cspell/dict-markdown + cspell link add @cspell/dict-npm + cspell link add @cspell/dict-public-licenses + cspell link add @cspell/dict-rust + cspell link add @cspell/dict-shell + .PHONY: buf/install buf/install: $(BINDIR)/buf diff --git a/README.md b/README.md index f383078d651..a03458bf5bb 100755 --- a/README.md +++ b/README.md @@ -264,7 +264,9 @@ make init ## Contributors + [![All Contributors](https://img.shields.io/badge/all_contributors-18-orange.svg?style=flat-square)](#contributors) + Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): diff --git a/apis/docs/v1/docs.md b/apis/docs/v1/docs.md index 22893448488..08bf1d81d61 100644 --- a/apis/docs/v1/docs.md +++ b/apis/docs/v1/docs.md @@ -1664,11 +1664,12 @@ Search service provides ways to search indexed vectors. Update service provides ways to update indexed vectors. -| Method Name | Request Type | Response Type | Description | -| ------------ | ------------------------------------------------------------------ | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------- | -| Update | [.payload.v1.Update.Request](#payload-v1-Update-Request) | [.payload.v1.Object.Location](#payload-v1-Object-Location) | A method to update an indexed vector. | -| StreamUpdate | [.payload.v1.Update.Request](#payload-v1-Update-Request) stream | [.payload.v1.Object.StreamLocation](#payload-v1-Object-StreamLocation) stream | A method to update multiple indexed vectors by bidirectional streaming. | -| MultiUpdate | [.payload.v1.Update.MultiRequest](#payload-v1-Update-MultiRequest) | [.payload.v1.Object.Locations](#payload-v1-Object-Locations) | A method to update multiple indexed vectors in a single request. | +| Method Name | Request Type | Response Type | Description | +| --------------- | ------------------------------------------------------------------ | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| Update | [.payload.v1.Update.Request](#payload-v1-Update-Request) | [.payload.v1.Object.Location](#payload-v1-Object-Location) | A method to update an indexed vector. | +| StreamUpdate | [.payload.v1.Update.Request](#payload-v1-Update-Request) stream | [.payload.v1.Object.StreamLocation](#payload-v1-Object-StreamLocation) stream | A method to update multiple indexed vectors by bidirectional streaming. | +| MultiUpdate | [.payload.v1.Update.MultiRequest](#payload-v1-Update-MultiRequest) | [.payload.v1.Object.Locations](#payload-v1-Object-Locations) | A method to update multiple indexed vectors in a single request. | +| UpdateTimestamp | [.payload.v1.Object.Timestamp](#payload-v1-Object-Timestamp) | [.payload.v1.Object.Location](#payload-v1-Object-Location) | A method to update timestamp an indexed vector. | diff --git a/apis/grpc/v1/vald/update.pb.go b/apis/grpc/v1/vald/update.pb.go index ad05fbe50c7..f98b9d1b26b 100644 --- a/apis/grpc/v1/vald/update.pb.go +++ b/apis/grpc/v1/vald/update.pb.go @@ -46,7 +46,7 @@ var file_v1_vald_update_proto_rawDesc = []byte{ 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0x9f, 0x02, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, + 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0x81, 0x03, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, @@ -64,32 +64,41 @@ var file_v1_vald_update_proto_rawDesc = []byte{ 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x3a, 0x01, 0x2a, 0x22, 0x10, 0x2f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x42, 0x53, 0x0a, 0x1a, 0x6f, 0x72, 0x67, - 0x2e, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x31, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x64, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x2f, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x12, 0x60, 0x0a, 0x0f, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x2e, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1a, 0x1b, 0x2e, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x12, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0c, 0x3a, + 0x01, 0x2a, 0x22, 0x07, 0x2f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x53, 0x0a, 0x1a, 0x6f, + 0x72, 0x67, 0x2e, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x64, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x2f, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x64, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_v1_vald_update_proto_goTypes = []any{ (*payload.Update_Request)(nil), // 0: payload.v1.Update.Request (*payload.Update_MultiRequest)(nil), // 1: payload.v1.Update.MultiRequest - (*payload.Object_Location)(nil), // 2: payload.v1.Object.Location - (*payload.Object_StreamLocation)(nil), // 3: payload.v1.Object.StreamLocation - (*payload.Object_Locations)(nil), // 4: payload.v1.Object.Locations + (*payload.Object_Timestamp)(nil), // 2: payload.v1.Object.Timestamp + (*payload.Object_Location)(nil), // 3: payload.v1.Object.Location + (*payload.Object_StreamLocation)(nil), // 4: payload.v1.Object.StreamLocation + (*payload.Object_Locations)(nil), // 5: payload.v1.Object.Locations } var file_v1_vald_update_proto_depIdxs = []int32{ 0, // 0: vald.v1.Update.Update:input_type -> payload.v1.Update.Request 0, // 1: vald.v1.Update.StreamUpdate:input_type -> payload.v1.Update.Request 1, // 2: vald.v1.Update.MultiUpdate:input_type -> payload.v1.Update.MultiRequest - 2, // 3: vald.v1.Update.Update:output_type -> payload.v1.Object.Location - 3, // 4: vald.v1.Update.StreamUpdate:output_type -> payload.v1.Object.StreamLocation - 4, // 5: vald.v1.Update.MultiUpdate:output_type -> payload.v1.Object.Locations - 3, // [3:6] is the sub-list for method output_type - 0, // [0:3] is the sub-list for method input_type + 2, // 3: vald.v1.Update.UpdateTimestamp:input_type -> payload.v1.Object.Timestamp + 3, // 4: vald.v1.Update.Update:output_type -> payload.v1.Object.Location + 4, // 5: vald.v1.Update.StreamUpdate:output_type -> payload.v1.Object.StreamLocation + 5, // 6: vald.v1.Update.MultiUpdate:output_type -> payload.v1.Object.Locations + 3, // 7: vald.v1.Update.UpdateTimestamp:output_type -> payload.v1.Object.Location + 4, // [4:8] is the sub-list for method output_type + 0, // [0:4] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/apis/grpc/v1/vald/update_vtproto.pb.go b/apis/grpc/v1/vald/update_vtproto.pb.go index 60f92fd0a80..6fc00c758c8 100644 --- a/apis/grpc/v1/vald/update_vtproto.pb.go +++ b/apis/grpc/v1/vald/update_vtproto.pb.go @@ -48,6 +48,8 @@ type UpdateClient interface { StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (Update_StreamUpdateClient, error) // A method to update multiple indexed vectors in a single request. MultiUpdate(ctx context.Context, in *payload.Update_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) + // A method to update timestamp an indexed vector. + UpdateTimestamp(ctx context.Context, in *payload.Object_Timestamp, opts ...grpc.CallOption) (*payload.Object_Location, error) } type updateClient struct { @@ -113,6 +115,17 @@ func (c *updateClient) MultiUpdate( return out, nil } +func (c *updateClient) UpdateTimestamp( + ctx context.Context, in *payload.Object_Timestamp, opts ...grpc.CallOption, +) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Update/UpdateTimestamp", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // UpdateServer is the server API for Update service. // All implementations must embed UnimplementedUpdateServer // for forward compatibility @@ -123,6 +136,8 @@ type UpdateServer interface { StreamUpdate(Update_StreamUpdateServer) error // A method to update multiple indexed vectors in a single request. MultiUpdate(context.Context, *payload.Update_MultiRequest) (*payload.Object_Locations, error) + // A method to update timestamp an indexed vector. + UpdateTimestamp(context.Context, *payload.Object_Timestamp) (*payload.Object_Location, error) mustEmbedUnimplementedUpdateServer() } @@ -144,6 +159,12 @@ func (UnimplementedUpdateServer) MultiUpdate( ) (*payload.Object_Locations, error) { return nil, status.Errorf(codes.Unimplemented, "method MultiUpdate not implemented") } + +func (UnimplementedUpdateServer) UpdateTimestamp( + context.Context, *payload.Object_Timestamp, +) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTimestamp not implemented") +} func (UnimplementedUpdateServer) mustEmbedUnimplementedUpdateServer() {} // UnsafeUpdateServer may be embedded to opt out of forward compatibility for this service. @@ -223,6 +244,26 @@ func _Update_MultiUpdate_Handler( return interceptor(ctx, in, info, handler) } +func _Update_UpdateTimestamp_Handler( + srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor, +) (any, error) { + in := new(payload.Object_Timestamp) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpdateServer).UpdateTimestamp(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Update/UpdateTimestamp", + } + handler := func(ctx context.Context, req any) (any, error) { + return srv.(UpdateServer).UpdateTimestamp(ctx, req.(*payload.Object_Timestamp)) + } + return interceptor(ctx, in, info, handler) +} + // Update_ServiceDesc is the grpc.ServiceDesc for Update service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -238,6 +279,10 @@ var Update_ServiceDesc = grpc.ServiceDesc{ MethodName: "MultiUpdate", Handler: _Update_MultiUpdate_Handler, }, + { + MethodName: "UpdateTimestamp", + Handler: _Update_UpdateTimestamp_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/apis/grpc/v1/vald/vald.go b/apis/grpc/v1/vald/vald.go index a67e7c97eff..c1255fac429 100644 --- a/apis/grpc/v1/vald/vald.go +++ b/apis/grpc/v1/vald/vald.go @@ -95,6 +95,7 @@ const ( UpdateObjectRPCName = "UpdateObject" StreamUpdateObjectRPCName = "StreamUpdateObject" MultiUpdateObjectRPCName = "MultiUpdateObject" + UpdateTimestampRPCName = "UpdateTimestamp" UpsertRPCName = "Upsert" StreamUpsertRPCName = "StreamUpsert" diff --git a/apis/proto/v1/vald/update.proto b/apis/proto/v1/vald/update.proto index 4cab1cabf09..ccd715efd7c 100644 --- a/apis/proto/v1/vald/update.proto +++ b/apis/proto/v1/vald/update.proto @@ -46,4 +46,12 @@ service Update { body: "*" }; } + + // A method to update timestamp an indexed vector. + rpc UpdateTimestamp(payload.v1.Object.Timestamp) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post: "/update" + body: "*" + }; + } } diff --git a/apis/swagger/v1/vald/update.swagger.json b/apis/swagger/v1/vald/update.swagger.json index d295febc194..85c729d7020 100644 --- a/apis/swagger/v1/vald/update.swagger.json +++ b/apis/swagger/v1/vald/update.swagger.json @@ -14,8 +14,8 @@ "paths": { "/update": { "post": { - "summary": "A method to update an indexed vector.", - "operationId": "Update_Update", + "summary": "A method to update timestamp an indexed vector.", + "operationId": "Update_UpdateTimestamp", "responses": { "200": { "description": "A successful response.", @@ -33,11 +33,11 @@ "parameters": [ { "name": "body", - "description": "Represent the update request.", + "description": "Represent a vector meta data.", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/v1UpdateRequest" + "$ref": "#/definitions/v1ObjectTimestamp" } } ], @@ -212,6 +212,21 @@ }, "description": "Represent the vector location." }, + "v1ObjectTimestamp": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The vector ID." + }, + "timestamp": { + "type": "string", + "format": "int64", + "description": "timestamp represents when this vector inserted." + } + }, + "description": "Represent a vector meta data." + }, "v1UpdateConfig": { "type": "object", "properties": { diff --git a/charts/vald-benchmark-operator/README.md b/charts/vald-benchmark-operator/README.md index a5117ebca42..bc3dda2a409 100644 --- a/charts/vald-benchmark-operator/README.md +++ b/charts/vald-benchmark-operator/README.md @@ -230,7 +230,7 @@ Run the following command to install the chart, | server_config.servers.grpc.server.probe_wait_time | string | `"3s"` | | | server_config.servers.grpc.server.restart | bool | `true` | | | server_config.servers.grpc.server.socket_path | string | `""` | | -| server_config.servers.grpc.serviecPort | int | `8081` | | +| server_config.servers.grpc.servicePort | int | `8081` | | | server_config.servers.rest.enabled | bool | `false` | | | server_config.tls.ca | string | `"/path/to/ca"` | | | server_config.tls.cert | string | `"/path/to/cert"` | | diff --git a/charts/vald-benchmark-operator/schemas/job-values.yaml b/charts/vald-benchmark-operator/schemas/job-values.yaml index a619e55d4d6..1e166a1db0b 100644 --- a/charts/vald-benchmark-operator/schemas/job-values.yaml +++ b/charts/vald-benchmark-operator/schemas/job-values.yaml @@ -800,7 +800,7 @@ server_config: reuse_port: true # server_config.healths.readiness.server.socket_option.reuse_addr -- server listen socket option for reuse_addr functionality reuse_addr: true - # server_config.healths.readiness.server.socket_option.tcp_fast_oepn -- server listen socket option for tcp_fast_open functionality + # server_config.healths.readiness.server.socket_option.tcp_fast_open -- server listen socket option for tcp_fast_open functionality tcp_fast_open: true # server_config.healths.readiness.server.socket_option.tcp_no_delay -- server listen socket option for tcp_no_delay functionality tcp_no_delay: true diff --git a/charts/vald-benchmark-operator/templates/deployment.yaml b/charts/vald-benchmark-operator/templates/deployment.yaml index 14df9b4c2ca..e8d36b3a418 100644 --- a/charts/vald-benchmark-operator/templates/deployment.yaml +++ b/charts/vald-benchmark-operator/templates/deployment.yaml @@ -47,7 +47,7 @@ spec: {{- if .Values.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if .Values.server_config.metrics.pprof.enabeld }} + {{- if .Values.server_config.metrics.pprof.enabled }} pyroscope.io/scrape: "true" pyroscope.io/application-name: {{ .Values.name }} pyroscope.io/profile-cpu-enabled: "true" diff --git a/charts/vald-benchmark-operator/values.yaml b/charts/vald-benchmark-operator/values.yaml index 07120439025..c044d2656d1 100644 --- a/charts/vald-benchmark-operator/values.yaml +++ b/charts/vald-benchmark-operator/values.yaml @@ -384,7 +384,7 @@ server_config: # @schema {"name": "server_config.servers.grpc.port", "type": "integer"} port: 8081 # @schema {"name": "server_config.servers.grpc.servicePort", "type": "integer"} - serviecPort: 8081 + servicePort: 8081 # @schema {"name": "server_config.servers.grpc.server", "type": "object"} server: # @schema {"name": "server_config.servers.grpc.server.mode", "type": "string"} diff --git a/charts/vald/values.yaml b/charts/vald/values.yaml index acbc75da52b..27960e5ba71 100644 --- a/charts/vald/values.yaml +++ b/charts/vald/values.yaml @@ -530,7 +530,7 @@ defaults: reuse_port: true # defaults.server_config.healths.readiness.server.socket_option.reuse_addr -- server listen socket option for reuse_addr functionality reuse_addr: true - # defaults.server_config.healths.readiness.server.socket_option.tcp_fast_oepn -- server listen socket option for tcp_fast_open functionality + # defaults.server_config.healths.readiness.server.socket_option.tcp_fast_open -- server listen socket option for tcp_fast_open functionality tcp_fast_open: true # defaults.server_config.healths.readiness.server.socket_option.tcp_no_delay -- server listen socket option for tcp_no_delay functionality tcp_no_delay: true diff --git a/dockers/agent/core/agent/Dockerfile b/dockers/agent/core/agent/Dockerfile index f3195dde427..d901bd0f7aa 100644 --- a/dockers/agent/core/agent/Dockerfile +++ b/dockers/agent/core/agent/Dockerfile @@ -36,11 +36,11 @@ ENV LC_ALL=en_US.UTF-8 ENV ORG=vdaas ENV PKG=agent/core/agent ENV REPO=vald -ENV RUST_HOME=/usr/loacl/lib/rust +ENV RUST_HOME=/usr/local/lib/rust ENV TZ=Etc/UTC ENV USER=root -ENV CARGO_HOME=${RUST_HOME}/cargo ENV RUSTUP_HOME=${RUST_HOME}/rustup +ENV CARGO_HOME=${RUST_HOME}/cargo ENV PATH=${CARGO_HOME}/bin:${RUSTUP_HOME}/bin:/usr/local/bin:${PATH} WORKDIR ${HOME}/rust/src/github.com/${ORG}/${REPO} SHELL ["/bin/bash", "-o", "pipefail", "-c"] @@ -91,4 +91,4 @@ LABEL maintainer="vdaas.org vald team " COPY --from=builder /usr/bin/agent /usr/bin/agent # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/agent"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/agent"] diff --git a/dockers/agent/core/faiss/Dockerfile b/dockers/agent/core/faiss/Dockerfile index 463e13494bd..3ee7076aad1 100644 --- a/dockers/agent/core/faiss/Dockerfile +++ b/dockers/agent/core/faiss/Dockerfile @@ -93,4 +93,4 @@ COPY --from=builder /usr/bin/faiss /usr/bin/faiss COPY cmd/agent/core/faiss/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/faiss"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/faiss"] diff --git a/dockers/agent/core/ngt/Dockerfile b/dockers/agent/core/ngt/Dockerfile index cf0bbcdce2d..55b55d7e439 100644 --- a/dockers/agent/core/ngt/Dockerfile +++ b/dockers/agent/core/ngt/Dockerfile @@ -92,4 +92,4 @@ COPY --from=builder /usr/bin/ngt /usr/bin/ngt COPY cmd/agent/core/ngt/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/ngt"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/ngt"] diff --git a/dockers/agent/sidecar/Dockerfile b/dockers/agent/sidecar/Dockerfile index 2b06e565fd9..0f9c0e07604 100644 --- a/dockers/agent/sidecar/Dockerfile +++ b/dockers/agent/sidecar/Dockerfile @@ -82,4 +82,4 @@ LABEL maintainer="vdaas.org vald team " COPY --from=builder /usr/bin/sidecar /usr/bin/sidecar # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/sidecar"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/sidecar"] diff --git a/dockers/binfmt/Dockerfile b/dockers/binfmt/Dockerfile index 47284d17e2c..f2d73909bdc 100644 --- a/dockers/binfmt/Dockerfile +++ b/dockers/binfmt/Dockerfile @@ -16,4 +16,4 @@ # # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go -FROM tonistiigi/binfmt:master AS builder \ No newline at end of file +FROM tonistiigi/binfmt:master AS builder diff --git a/dockers/buildbase/Dockerfile b/dockers/buildbase/Dockerfile index 6457b014578..301a31e0103 100644 --- a/dockers/buildbase/Dockerfile +++ b/dockers/buildbase/Dockerfile @@ -16,4 +16,4 @@ # # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go -FROM ubuntu:devel AS builder \ No newline at end of file +FROM ubuntu:devel AS builder diff --git a/dockers/buildkit/Dockerfile b/dockers/buildkit/Dockerfile index 9dd722ea255..99c71c61e23 100644 --- a/dockers/buildkit/Dockerfile +++ b/dockers/buildkit/Dockerfile @@ -16,4 +16,4 @@ # # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go -FROM moby/buildkit:master AS builder \ No newline at end of file +FROM moby/buildkit:master AS builder diff --git a/dockers/ci/base/Dockerfile b/dockers/ci/base/Dockerfile index 09fccb1e35e..270b2f18763 100644 --- a/dockers/ci/base/Dockerfile +++ b/dockers/ci/base/Dockerfile @@ -41,7 +41,7 @@ ENV LC_ALL=en_US.UTF-8 ENV ORG=vdaas ENV PKG=ci/base ENV REPO=vald -ENV RUST_HOME=/usr/loacl/lib/rust +ENV RUST_HOME=/usr/local/lib/rust ENV TZ=Etc/UTC ENV USER=root ENV RUSTUP_HOME=${RUST_HOME}/rustup @@ -113,7 +113,6 @@ RUN --mount=type=bind,target=.,rw \ && make kubelinter/install \ && make reviewdog/install \ && make tparse/install \ - && make valdcli/install \ && make yq/install \ && make minikube/install \ && make stern/install \ @@ -123,4 +122,4 @@ RUN --mount=type=bind,target=.,rw \ && rm -rf ${GOPATH}/src/github.com/${ORG}/${REPO}/* # skipcq: DOK-DL3002 USER root:root -ENTRYPOINT ["/bin/bash"] \ No newline at end of file +ENTRYPOINT ["/bin/bash"] diff --git a/dockers/dev/Dockerfile b/dockers/dev/Dockerfile index 502f386e973..061c48169fc 100644 --- a/dockers/dev/Dockerfile +++ b/dockers/dev/Dockerfile @@ -41,11 +41,11 @@ ENV LC_ALL=en_US.UTF-8 ENV ORG=vdaas ENV PKG=dev ENV REPO=vald -ENV RUST_HOME=/usr/loacl/lib/rust +ENV RUST_HOME=/usr/local/lib/rust ENV TZ=Etc/UTC ENV USER=root -ENV CARGO_HOME=${RUST_HOME}/cargo ENV RUSTUP_HOME=${RUST_HOME}/rustup +ENV CARGO_HOME=${RUST_HOME}/cargo ENV PATH=${CARGO_HOME}/bin:${GOPATH}/bin:${GOROOT}/bin:${RUSTUP_HOME}/bin:/usr/local/bin:${PATH} WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO} SHELL ["/bin/bash", "-o", "pipefail", "-c"] @@ -133,7 +133,6 @@ RUN --mount=type=bind,target=.,rw \ && make kubelinter/install \ && make reviewdog/install \ && make tparse/install \ - && make valdcli/install \ && make yq/install \ && make minikube/install \ && make stern/install \ @@ -142,4 +141,4 @@ RUN --mount=type=bind,target=.,rw \ && make faiss/install \ && rm -rf ${GOPATH}/src/github.com/${ORG}/${REPO}/* # skipcq: DOK-DL3002 -USER root:root \ No newline at end of file +USER root:root diff --git a/dockers/discoverer/k8s/Dockerfile b/dockers/discoverer/k8s/Dockerfile index 19e5953ee21..42d7b476f7f 100644 --- a/dockers/discoverer/k8s/Dockerfile +++ b/dockers/discoverer/k8s/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/discoverer /usr/bin/discoverer COPY cmd/discoverer/k8s/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/discoverer"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/discoverer"] diff --git a/dockers/gateway/filter/Dockerfile b/dockers/gateway/filter/Dockerfile index b523b5e5b87..bd6b3dadd3b 100644 --- a/dockers/gateway/filter/Dockerfile +++ b/dockers/gateway/filter/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/filter /usr/bin/filter COPY cmd/gateway/filter/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/filter"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/filter"] diff --git a/dockers/gateway/lb/Dockerfile b/dockers/gateway/lb/Dockerfile index 2f8c91768b0..f6c3f0b5c55 100644 --- a/dockers/gateway/lb/Dockerfile +++ b/dockers/gateway/lb/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/lb /usr/bin/lb COPY cmd/gateway/lb/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/lb"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/lb"] diff --git a/dockers/gateway/mirror/Dockerfile b/dockers/gateway/mirror/Dockerfile index 9b97231c740..f3effce23fe 100644 --- a/dockers/gateway/mirror/Dockerfile +++ b/dockers/gateway/mirror/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/mirror /usr/bin/mirror COPY cmd/gateway/mirror/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/mirror"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/mirror"] diff --git a/dockers/index/job/correction/Dockerfile b/dockers/index/job/correction/Dockerfile index 01e3818c56b..06b7642c1a5 100644 --- a/dockers/index/job/correction/Dockerfile +++ b/dockers/index/job/correction/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/index-correction /usr/bin/index-correction COPY cmd/index/job/correction/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-correction"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-correction"] diff --git a/dockers/index/job/creation/Dockerfile b/dockers/index/job/creation/Dockerfile index d656b3ad222..6ea3eda4b6d 100644 --- a/dockers/index/job/creation/Dockerfile +++ b/dockers/index/job/creation/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/index-creation /usr/bin/index-creation COPY cmd/index/job/creation/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-creation"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-creation"] diff --git a/dockers/index/job/readreplica/rotate/Dockerfile b/dockers/index/job/readreplica/rotate/Dockerfile index bdb0ec76646..432085dbb42 100644 --- a/dockers/index/job/readreplica/rotate/Dockerfile +++ b/dockers/index/job/readreplica/rotate/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/readreplica-rotate /usr/bin/readreplica-rotate COPY cmd/index/job/readreplica/rotate/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/readreplica-rotate"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/readreplica-rotate"] diff --git a/dockers/index/job/save/Dockerfile b/dockers/index/job/save/Dockerfile index fdd674abd49..1b0797a81d0 100644 --- a/dockers/index/job/save/Dockerfile +++ b/dockers/index/job/save/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/index-save /usr/bin/index-save COPY cmd/index/job/save/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-save"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-save"] diff --git a/dockers/index/operator/Dockerfile b/dockers/index/operator/Dockerfile index 23e9aae5140..c2f652283f1 100644 --- a/dockers/index/operator/Dockerfile +++ b/dockers/index/operator/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/index-operator /usr/bin/index-operator COPY cmd/index/operator/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-operator"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-operator"] diff --git a/dockers/manager/index/Dockerfile b/dockers/manager/index/Dockerfile index edecb98a6c7..8eb554a3905 100644 --- a/dockers/manager/index/Dockerfile +++ b/dockers/manager/index/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/index /usr/bin/index COPY cmd/manager/index/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index"] diff --git a/dockers/operator/helm/Dockerfile b/dockers/operator/helm/Dockerfile index db240947906..d5edc4f6e9f 100644 --- a/dockers/operator/helm/Dockerfile +++ b/dockers/operator/helm/Dockerfile @@ -104,4 +104,4 @@ COPY --from=builder /opt/helm/charts/vald /opt/helm/charts/vald COPY --from=builder /opt/helm/charts/vald-helm-operator /opt/helm/charts/vald-helm-operator # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/helm-operator", "run", "--watches-file=/opt/helm/watches.yaml"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/helm-operator", "run", "--watches-file=/opt/helm/watches.yaml"] diff --git a/dockers/tools/benchmark/job/Dockerfile b/dockers/tools/benchmark/job/Dockerfile index 65da31d4680..d77a1181fd1 100644 --- a/dockers/tools/benchmark/job/Dockerfile +++ b/dockers/tools/benchmark/job/Dockerfile @@ -91,4 +91,4 @@ COPY --from=builder /usr/bin/job /usr/bin/job COPY cmd/tools/benchmark/job/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/job"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/job"] diff --git a/dockers/tools/benchmark/operator/Dockerfile b/dockers/tools/benchmark/operator/Dockerfile index 283773b11ef..efe0a34e9fd 100644 --- a/dockers/tools/benchmark/operator/Dockerfile +++ b/dockers/tools/benchmark/operator/Dockerfile @@ -83,4 +83,4 @@ COPY --from=builder /usr/bin/operator /usr/bin/operator COPY cmd/tools/benchmark/operator/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/operator"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/operator"] diff --git a/dockers/tools/cli/loadtest/Dockerfile b/dockers/tools/cli/loadtest/Dockerfile index ffbea81b056..d061b486f98 100644 --- a/dockers/tools/cli/loadtest/Dockerfile +++ b/dockers/tools/cli/loadtest/Dockerfile @@ -91,4 +91,4 @@ COPY --from=builder /usr/bin/loadtest /usr/bin/loadtest COPY cmd/tools/cli/loadtest/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/loadtest"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/loadtest"] diff --git a/docs/contributing/unit-test-guideline.md b/docs/contributing/unit-test-guideline.md index 833e65addfc..41341bd704c 100644 --- a/docs/contributing/unit-test-guideline.md +++ b/docs/contributing/unit-test-guideline.md @@ -128,7 +128,7 @@ You have to create unit tests for error patterns as the same as success patterns #### Advanced -##### Robust boudary test +##### Robust boundary test The previous section is about the basic test cases. The (robust) boundary test should be applied to cover more test coverage. diff --git a/docs/user-guides/observability-configuration.md b/docs/user-guides/observability-configuration.md index 677781f191b..db965a94178 100644 --- a/docs/user-guides/observability-configuration.md +++ b/docs/user-guides/observability-configuration.md @@ -176,7 +176,7 @@ defaults: #### Specify the Telemetry attribute -You can add the component information to the attribute of telemetry data by editing `defaults.observability.otlp.attirbute`. +You can add the component information to the attribute of telemetry data by editing `defaults.observability.otlp.attribute`. E.g., when setting `vald-agent-ngt-0` as `agent.observability.otlp.attribute.pod_name`, `target_pod: vald-agent-ngt-0` will be added to the attribute. These attributes are set auto by the environment values, so Vald recommends using default values unless there is a specific reason. diff --git a/example/client/go.mod b/example/client/go.mod index f64e6546d4f..cc7f72f0f2a 100644 --- a/example/client/go.mod +++ b/example/client/go.mod @@ -11,9 +11,9 @@ replace ( golang.org/x/crypto => golang.org/x/crypto v0.26.0 golang.org/x/net => golang.org/x/net v0.28.0 golang.org/x/text => golang.org/x/text v0.17.0 - google.golang.org/genproto => google.golang.org/genproto v0.0.0-20240805194559-2c9e96a0b5d4 - google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240805194559-2c9e96a0b5d4 - google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20240805194559-2c9e96a0b5d4 + google.golang.org/genproto => google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 + google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988 + google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988 google.golang.org/grpc => google.golang.org/grpc v1.65.0 google.golang.org/protobuf => google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.4.0 diff --git a/example/client/go.sum b/example/client/go.sum index d91b037bda2..db50f99b1ee 100644 --- a/example/client/go.sum +++ b/example/client/go.sum @@ -30,10 +30,10 @@ golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946 h1:vJpL69PeUullhJyKtTjHjENEmZU3BkO4e+fod7nKzgM= gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946/go.mod h1:BQUWDHIAygjdt1HnUPQ0eWqLN2n5FwJycrpYUVUOx2I= -google.golang.org/genproto/googleapis/api v0.0.0-20240805194559-2c9e96a0b5d4 h1:ABEBT/sZ7We8zd7A5f3KO6zMQe+s3901H7l8Whhijt0= -google.golang.org/genproto/googleapis/api v0.0.0-20240805194559-2c9e96a0b5d4/go.mod h1:4+X6GvPs+25wZKbQq9qyAXrwIRExv7w0Ea6MgZLZiDM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240805194559-2c9e96a0b5d4 h1:OsSGQeIIsyOEOimVxLEIL4rwGcnrjOydQaiA2bOnZUM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240805194559-2c9e96a0b5d4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988 h1:+/tmTy5zAieooKIXfzDm9KiA3Bv6JBwriRN9LY+yayk= +google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988/go.mod h1:4+X6GvPs+25wZKbQq9qyAXrwIRExv7w0Ea6MgZLZiDM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988 h1:V71AcdLZr2p8dC9dbOIMCpqi4EmRl8wUwnJzXXLmbmc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= diff --git a/go.mod b/go.mod index 333ed08f291..c9e699b97dc 100644 --- a/go.mod +++ b/go.mod @@ -5,17 +5,17 @@ go 1.22.6 replace ( cloud.google.com/go => cloud.google.com/go v0.115.0 cloud.google.com/go/bigquery => cloud.google.com/go/bigquery v1.62.0 - cloud.google.com/go/compute => cloud.google.com/go/compute v1.27.4 + cloud.google.com/go/compute => cloud.google.com/go/compute v1.27.5 cloud.google.com/go/datastore => cloud.google.com/go/datastore v1.17.1 cloud.google.com/go/firestore => cloud.google.com/go/firestore v1.16.0 - cloud.google.com/go/iam => cloud.google.com/go/iam v1.1.12 - cloud.google.com/go/kms => cloud.google.com/go/kms v1.18.4 - cloud.google.com/go/monitoring => cloud.google.com/go/monitoring v1.20.3 + cloud.google.com/go/iam => cloud.google.com/go/iam v1.1.13 + cloud.google.com/go/kms => cloud.google.com/go/kms v1.18.5 + cloud.google.com/go/monitoring => cloud.google.com/go/monitoring v1.20.4 cloud.google.com/go/pubsub => cloud.google.com/go/pubsub v1.41.0 - cloud.google.com/go/secretmanager => cloud.google.com/go/secretmanager v1.13.5 + cloud.google.com/go/secretmanager => cloud.google.com/go/secretmanager v1.13.6 cloud.google.com/go/storage => cloud.google.com/go/storage v1.43.0 - cloud.google.com/go/trace => cloud.google.com/go/trace v1.10.11 - code.cloudfoundry.org/bytefmt => code.cloudfoundry.org/bytefmt v0.0.0-20240806182212-6cf545ebdd6b + cloud.google.com/go/trace => cloud.google.com/go/trace v1.10.12 + code.cloudfoundry.org/bytefmt => code.cloudfoundry.org/bytefmt v0.0.0-20240808182453-a379845013d9 contrib.go.opencensus.io/exporter/aws => contrib.go.opencensus.io/exporter/aws v0.0.0-20230502192102-15967c811cec contrib.go.opencensus.io/exporter/prometheus => contrib.go.opencensus.io/exporter/prometheus v0.4.2 contrib.go.opencensus.io/integrations/ocsql => contrib.go.opencensus.io/integrations/ocsql v0.1.7 @@ -63,7 +63,7 @@ replace ( github.com/aws/aws-sdk-go-v2/service/secretsmanager => github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4 github.com/aws/aws-sdk-go-v2/service/sns => github.com/aws/aws-sdk-go-v2/service/sns v1.31.3 github.com/aws/aws-sdk-go-v2/service/sqs => github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 - github.com/aws/aws-sdk-go-v2/service/ssm => github.com/aws/aws-sdk-go-v2/service/ssm v1.52.3 + github.com/aws/aws-sdk-go-v2/service/ssm => github.com/aws/aws-sdk-go-v2/service/ssm v1.52.4 github.com/aws/aws-sdk-go-v2/service/sso => github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 github.com/aws/aws-sdk-go-v2/service/sts => github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 github.com/aws/smithy-go => github.com/aws/smithy-go v1.20.3 @@ -192,7 +192,7 @@ replace ( github.com/jstemmer/go-junit-report => github.com/jstemmer/go-junit-report v1.0.0 github.com/kisielk/errcheck => github.com/kisielk/errcheck v1.7.0 github.com/kisielk/gotool => github.com/kisielk/gotool v1.0.0 - github.com/klauspost/compress => github.com/klauspost/compress v1.17.10-0.20240805110405-8b81499bfd70 + github.com/klauspost/compress => github.com/klauspost/compress v1.17.10-0.20240812095115-3868468e621b github.com/klauspost/cpuid/v2 => github.com/klauspost/cpuid/v2 v2.2.8 github.com/kpango/fastime => github.com/kpango/fastime v1.1.9 github.com/kpango/fuid => github.com/kpango/fuid v0.0.0-20221203053508-503b5ad89aa1 @@ -295,8 +295,8 @@ replace ( go.uber.org/zap => go.uber.org/zap v1.27.0 gocloud.dev => gocloud.dev v0.38.0 golang.org/x/crypto => golang.org/x/crypto v0.26.0 - golang.org/x/exp => golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/exp/typeparams => golang.org/x/exp/typeparams v0.0.0-20240719175910-8a7402abbf56 + golang.org/x/exp => golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa + golang.org/x/exp/typeparams => golang.org/x/exp/typeparams v0.0.0-20240808152545-0cdaa3abc0fa golang.org/x/image => golang.org/x/image v0.19.0 golang.org/x/lint => golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/mobile => golang.org/x/mobile v0.0.0-20240806205939-81131f6468ab @@ -304,7 +304,7 @@ replace ( golang.org/x/net => golang.org/x/net v0.28.0 golang.org/x/oauth2 => golang.org/x/oauth2 v0.22.0 golang.org/x/sync => golang.org/x/sync v0.8.0 - golang.org/x/sys => golang.org/x/sys v0.23.0 + golang.org/x/sys => golang.org/x/sys v0.24.0 golang.org/x/term => golang.org/x/term v0.23.0 golang.org/x/text => golang.org/x/text v0.17.0 golang.org/x/time => golang.org/x/time v0.6.0 @@ -316,9 +316,9 @@ replace ( gonum.org/v1/plot => gonum.org/v1/plot v0.14.0 google.golang.org/api => google.golang.org/api v0.191.0 google.golang.org/appengine => google.golang.org/appengine v1.6.8 - google.golang.org/genproto => google.golang.org/genproto v0.0.0-20240805194559-2c9e96a0b5d4 - google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240805194559-2c9e96a0b5d4 - google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20240805194559-2c9e96a0b5d4 + google.golang.org/genproto => google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 + google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988 + google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988 google.golang.org/grpc => google.golang.org/grpc v1.65.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc => google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 google.golang.org/protobuf => google.golang.org/protobuf v1.34.2 @@ -336,10 +336,10 @@ replace ( k8s.io/client-go => k8s.io/client-go v0.30.3 k8s.io/component-base => k8s.io/component-base v0.30.3 k8s.io/klog/v2 => k8s.io/klog/v2 v2.130.1 - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240730131305-7a9a4e85957e + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240812233141-91dab695df6f k8s.io/kubernetes => k8s.io/kubernetes v0.30.3 k8s.io/metrics => k8s.io/metrics v0.30.3 - nhooyr.io/websocket => nhooyr.io/websocket v1.8.11 + nhooyr.io/websocket => nhooyr.io/websocket v1.8.17 rsc.io/pdf => rsc.io/pdf v0.1.1 sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/json => sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd @@ -399,7 +399,7 @@ require ( golang.org/x/net v0.28.0 golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.23.0 + golang.org/x/sys v0.24.0 golang.org/x/text v0.17.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.24.0 @@ -422,7 +422,7 @@ require ( require ( cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.3 // indirect + cloud.google.com/go/auth v0.8.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect cloud.google.com/go/iam v1.1.12 // indirect @@ -506,14 +506,14 @@ require ( go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect golang.org/x/exp/typeparams v0.0.0-20240213143201-ec583247a57a // indirect golang.org/x/image v0.19.0 // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.189.0 // indirect + google.golang.org/api v0.191.0 // indirect google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 106b02efec4..04313bdd063 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,9 @@ cloud.google.com/go/auth v0.6.0/go.mod h1:b4acV+jLQDyjwm4OXHYjNvRi4jvGBzHWJRtJcy cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4= cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= -cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= +cloud.google.com/go/auth v0.8.0 h1:y8jUJLl/Fg+qNBWxP/Hox2ezJvjkrPb952PC1p0G6A4= +cloud.google.com/go/auth v0.8.0/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= @@ -45,7 +46,7 @@ cloud.google.com/go/channel v1.17.11/go.mod h1:gjWCDBcTGQce/BSMoe2lAqhlq0dIRiZuk cloud.google.com/go/cloudbuild v1.16.5/go.mod h1:HXLpZ8QeYZgmDIWpbl9Gs22p6o6uScgQ/cV9HF9cIZU= cloud.google.com/go/clouddms v1.7.10/go.mod h1:PzHELq0QDyA7VaD9z6mzh2mxeBz4kM6oDe8YxMxd4RA= cloud.google.com/go/cloudtasks v1.12.12/go.mod h1:8UmM+duMrQpzzRREo0i3x3TrFjsgI/3FQw3664/JblA= -cloud.google.com/go/compute v1.27.4/go.mod h1:7JZS+h21ERAGHOy5qb7+EPyXlQwzshzrx1x6L9JhTqU= +cloud.google.com/go/compute v1.27.5/go.mod h1:DfwDGujFTdSeiE8b8ZqadF/uxHFBz+ekGsk8Zfi9dTA= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= @@ -84,12 +85,12 @@ cloud.google.com/go/gkehub v0.14.11/go.mod h1:CsmDJ4qbBnSPkoBltEubK6qGOjG0xNfeeT cloud.google.com/go/gkemulticloud v1.2.4/go.mod h1:PjTtoKLQpIRztrL+eKQw8030/S4c7rx/WvHydDJlpGE= cloud.google.com/go/grafeas v0.3.6/go.mod h1:to6ECAPgRO2xeqD8ISXHc70nObJuaKZThreQOjeOH3o= cloud.google.com/go/gsuiteaddons v1.6.11/go.mod h1:U7mk5PLBzDpHhgHv5aJkuvLp9RQzZFpa8hgWAB+xVIk= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= +cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= +cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= cloud.google.com/go/iap v1.9.10/go.mod h1:pO0FEirrhMOT1H0WVwpD5dD9r3oBhvsunyBQtNXzzc0= cloud.google.com/go/ids v1.4.11/go.mod h1:+ZKqWELpJm8WcRRsSvKZWUdkriu4A3XsLLzToTv3418= cloud.google.com/go/iot v1.7.11/go.mod h1:0vZJOqFy9kVLbUXwTP95e0dWHakfR4u5IWqsKMGIfHk= -cloud.google.com/go/kms v1.18.4/go.mod h1:SG1bgQ3UWW6/KdPo9uuJnzELXY5YTTMJtDYvajiQ22g= +cloud.google.com/go/kms v1.18.5/go.mod h1:yXunGUGzabH8rjUPImp2ndHiGolHeWJJ0LODLedicIY= cloud.google.com/go/language v1.13.0/go.mod h1:B9FbD17g1EkilctNGUDAdSrBHiFOlKNErLljO7jplDU= cloud.google.com/go/lifesciences v0.9.11/go.mod h1:NMxu++FYdv55TxOBEvLIhiAvah8acQwXsz79i9l9/RY= cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= @@ -108,7 +109,7 @@ cloud.google.com/go/maps v1.11.6/go.mod h1:MOS/NN0L6b7Kumr8bLux9XTpd8+D54DYxBMUj cloud.google.com/go/mediatranslation v0.8.11/go.mod h1:3sNEm0fx61eHk7rfzBzrljVV9XKr931xI3OFacQBVFg= cloud.google.com/go/memcache v1.10.11/go.mod h1:ubJ7Gfz/xQawQY5WO5pht4Q0dhzXBFeEszAeEJnwBHU= cloud.google.com/go/metastore v1.13.10/go.mod h1:RPhMnBxUmTLT1fN7fNbPqtH5EoGHueDxubmJ1R1yT84= -cloud.google.com/go/monitoring v1.20.3/go.mod h1:GPIVIdNznIdGqEjtRKQWTLcUeRnPjZW85szouimiczU= +cloud.google.com/go/monitoring v1.20.4/go.mod h1:v7F/UcLRw15EX7xq565N7Ae5tnYEE28+Cl717aTXG4c= cloud.google.com/go/networkconnectivity v1.14.10/go.mod h1:f7ZbGl4CV08DDb7lw+NmMXQTKKjMhgCEEwFbEukWuOY= cloud.google.com/go/networkmanagement v1.13.6/go.mod h1:WXBijOnX90IFb6sberjnGrVtZbgDNcPDUYOlGXmG8+4= cloud.google.com/go/networksecurity v0.9.11/go.mod h1:4xbpOqCwplmFgymAjPFM6ZIplVC6+eQ4m7sIiEq9oJA= @@ -134,7 +135,7 @@ cloud.google.com/go/resourcesettings v1.7.4/go.mod h1:seBdLuyeq+ol2u9G2+74GkSjQa cloud.google.com/go/retail v1.17.4/go.mod h1:oPkL1FzW7D+v/hX5alYIx52ro2FY/WPAviwR1kZZTMs= cloud.google.com/go/run v1.4.0/go.mod h1:4G9iHLjdOC+CQ0CzA0+6nLeR6NezVPmlj+GULmb0zE4= cloud.google.com/go/scheduler v1.10.12/go.mod h1:6DRtOddMWJ001HJ6MS148rtLSh/S2oqd2hQC3n5n9fQ= -cloud.google.com/go/secretmanager v1.13.5/go.mod h1:/OeZ88l5Z6nBVilV0SXgv6XJ243KP2aIhSWRMrbvDCQ= +cloud.google.com/go/secretmanager v1.13.6/go.mod h1:x2ySyOrqv3WGFRFn2Xk10iHmNmvmcEVSSqc30eb1bhw= cloud.google.com/go/security v1.17.4/go.mod h1:KMuDJH+sEB3KTODd/tLJ7kZK+u2PQt+Cfu0oAxzIhgo= cloud.google.com/go/securitycenter v1.33.1/go.mod h1:jeFisdYUWHr+ig72T4g0dnNCFhRwgwGoQV6GFuEwafw= cloud.google.com/go/servicedirectory v1.11.11/go.mod h1:pnynaftaj9LmRLIc6t3r7r7rdCZZKKxui/HaF/RqYfs= @@ -147,7 +148,7 @@ cloud.google.com/go/storagetransfer v1.10.10/go.mod h1:8+nX+WgQ2ZJJnK8e+RbK/zCXk cloud.google.com/go/talent v1.6.12/go.mod h1:nT9kNVuJhZX2QgqKZS6t6eCWZs5XEBYRBv6bIMnPmo4= cloud.google.com/go/texttospeech v1.7.11/go.mod h1:Ua125HU+WT2IkIo5MzQtuNpNEk72soShJQVdorZ1SAE= cloud.google.com/go/tpu v1.6.11/go.mod h1:W0C4xaSj1Ay3VX/H96FRvLt2HDs0CgdRPVI4e7PoCDk= -cloud.google.com/go/trace v1.10.11/go.mod h1:fUr5L3wSXerNfT0f1bBg08W4axS2VbHGgYcfH4KuTXU= +cloud.google.com/go/trace v1.10.12/go.mod h1:tYkAIta/gxgbBZ/PIzFxSH5blajgX4D00RpQqCG/GZs= cloud.google.com/go/translate v1.10.3/go.mod h1:GW0vC1qvPtd3pgtypCv4k4U8B7EdgK9/QEF2aJEUovs= cloud.google.com/go/translate v1.10.7/go.mod h1:mH/+8tvcItuy1cOWqU+/Y3iFHgkVUObNIQYI/kiFFiY= cloud.google.com/go/video v1.22.0/go.mod h1:CxPshUNAb1ucnzbtruEHlAal9XY+SPG2cFqC/woJzII= @@ -159,8 +160,8 @@ cloud.google.com/go/vpcaccess v1.7.11/go.mod h1:a2cuAiSCI4TVK0Dt6/dRjf22qQvfY+po cloud.google.com/go/webrisk v1.9.11/go.mod h1:mK6M8KEO0ZI7VkrjCq3Tjzw4vYq+3c4DzlMUDVaiswE= cloud.google.com/go/websecurityscanner v1.6.11/go.mod h1:vhAZjksELSg58EZfUQ1BMExD+hxqpn0G0DuyCZQjiTg= cloud.google.com/go/workflows v1.12.10/go.mod h1:RcKqCiOmKs8wFUEf3EwWZPH5eHc7Oq0kamIyOUCk0IE= -code.cloudfoundry.org/bytefmt v0.0.0-20240806182212-6cf545ebdd6b h1:C+YxvUtePwiSzniQagI/yDdWjh1Lx1TkstHBGN7OTnA= -code.cloudfoundry.org/bytefmt v0.0.0-20240806182212-6cf545ebdd6b/go.mod h1:9aVxojRyikUaWddFMb0A9tgpGMtDPhk1pnCfhY0/fA4= +code.cloudfoundry.org/bytefmt v0.0.0-20240808182453-a379845013d9 h1:8KlrGCtoaWaaxVxi9KzED38kNIWa1qafh9bNSVZ6otk= +code.cloudfoundry.org/bytefmt v0.0.0-20240808182453-a379845013d9/go.mod h1:eF2ZbltNI7Pv+8Cuyeksu9up5FN5konuH0trDJBuscw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20221208032759-85de2813cf6b/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= eliasnaur.com/font v0.0.0-20230308162249-dd43949cb42d/go.mod h1:OYVuxibdk9OSLX8vAqydtRPP87PyTFcT9uH3MlEGBQA= @@ -484,8 +485,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.17.10-0.20240805110405-8b81499bfd70 h1:3rXiP8hu9oCOQ7ZcJO7x4Dh/r0yCkwpj7GGOB5fA20U= -github.com/klauspost/compress v1.17.10-0.20240805110405-8b81499bfd70/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.10-0.20240812095115-3868468e621b h1:4A/j6hb0Sd3VXqhNtgmUlcPy353Qaa0aIfAPcBrI1n8= +github.com/klauspost/compress v1.17.10-0.20240812095115-3868468e621b/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kpango/fastime v1.1.9 h1:xVQHcqyPt5M69DyFH7g1EPRns1YQNap9d5eLhl/Jy84= @@ -684,14 +685,14 @@ gocloud.dev v0.38.0 h1:SpxfaOc/Fp4PeO8ui7wRcCZV0EgXZ+IWcVSLn6ZMSw0= gocloud.dev v0.38.0/go.mod h1:3XjKvd2E5iVNu/xFImRzjN0d/fkNHe4s0RiKidpEUMQ= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/exp/shiny v0.0.0-20220827204233-334a2380cb91/go.mod h1:VjAR7z0ngyATZTELrBSkxOOHhhlnVUxDye4mcjx5h/8= golang.org/x/exp/shiny v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:UH99kUObWAZkDnWqppdQe5ZhPYESUw8I0zVV1uWBR+0= golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63/go.mod h1:UH99kUObWAZkDnWqppdQe5ZhPYESUw8I0zVV1uWBR+0= golang.org/x/exp/shiny v0.0.0-20240707233637-46b078467d37/go.mod h1:3F+MieQB7dRYLTmnncoFbb1crS5lfQoTfDgQy6K4N0o= -golang.org/x/exp/typeparams v0.0.0-20240719175910-8a7402abbf56 h1:i+QrZdyNyfLEnWjd5T6LQZvQP3xk2XiNs3sQgN7QDGE= -golang.org/x/exp/typeparams v0.0.0-20240719175910-8a7402abbf56/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20240808152545-0cdaa3abc0fa h1:54T+HVkPu4D3lltpEHyI3Fs2pG/GqjGkXLgyKOmifXk= +golang.org/x/exp/typeparams v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.19.0 h1:D9FX4QWkLfkeqaC62SonffIIuYdOk/UE2XKUBgRIBIQ= golang.org/x/image v0.19.0/go.mod h1:y0zrRqlQRWQ5PXaYCOMLTW2fpsxZ8Qh9I/ohnInJEys= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -704,8 +705,8 @@ golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= @@ -728,13 +729,13 @@ gonum.org/v1/plot v0.14.0/go.mod h1:MLdR9424SJed+5VqC6MsouEpig9pZX2VZ57H9ko2bXU= google.golang.org/api v0.191.0 h1:cJcF09Z+4HAB2t5qTQM1ZtfL/PemsLFkcFG67qq2afk= google.golang.org/api v0.191.0/go.mod h1:tD5dsFGxFza0hnQveGfVk9QQYKcfp+VzgRqyXFxE0+E= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20240805194559-2c9e96a0b5d4 h1:g+rQ3aqOyXK/0qwnC5TGUXnyIeipstP5SsniB9uPJ2c= -google.golang.org/genproto v0.0.0-20240805194559-2c9e96a0b5d4/go.mod h1:7uvplUBj4RjHAxIZ//98LzOvrQ04JBkaixRmCMI29hc= -google.golang.org/genproto/googleapis/api v0.0.0-20240805194559-2c9e96a0b5d4 h1:ABEBT/sZ7We8zd7A5f3KO6zMQe+s3901H7l8Whhijt0= -google.golang.org/genproto/googleapis/api v0.0.0-20240805194559-2c9e96a0b5d4/go.mod h1:4+X6GvPs+25wZKbQq9qyAXrwIRExv7w0Ea6MgZLZiDM= +google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 h1:CT2Thj5AuPV9phrYMtzX11k+XkzMGfRAet42PmoTATM= +google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988/go.mod h1:7uvplUBj4RjHAxIZ//98LzOvrQ04JBkaixRmCMI29hc= +google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988 h1:+/tmTy5zAieooKIXfzDm9KiA3Bv6JBwriRN9LY+yayk= +google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988/go.mod h1:4+X6GvPs+25wZKbQq9qyAXrwIRExv7w0Ea6MgZLZiDM= google.golang.org/genproto/googleapis/bytestream v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:5/MT647Cn/GGhwTpXC7QqcaR5Cnee4v4MKCU1/nwnIQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240805194559-2c9e96a0b5d4 h1:OsSGQeIIsyOEOimVxLEIL4rwGcnrjOydQaiA2bOnZUM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240805194559-2c9e96a0b5d4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988 h1:V71AcdLZr2p8dC9dbOIMCpqi4EmRl8wUwnJzXXLmbmc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= @@ -764,8 +765,8 @@ k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240730131305-7a9a4e85957e h1:OnKkExfhk4yxMqvBSPzUfhv3zQ96FWJ+UOZzLrAFyAo= -k8s.io/kube-openapi v0.0.0-20240730131305-7a9a4e85957e/go.mod h1:0CVn9SVo8PeW5/JgsBZZIFmmTk5noOM8WXf2e1tCihE= +k8s.io/kube-openapi v0.0.0-20240812233141-91dab695df6f h1:bnWtxXWdAl5bVOCEPoNdvMkyj6cTW3zxHuwKIakuV9w= +k8s.io/kube-openapi v0.0.0-20240812233141-91dab695df6f/go.mod h1:G0W3eI9gG219NHRq3h5uQaRBl4pj4ZpwzRP5ti8y770= k8s.io/metrics v0.30.3 h1:gKCpte5zykrOmQhZ8qmsxyJslMdiLN+sqbBfIWNpbGM= k8s.io/metrics v0.30.3/go.mod h1:W06L2nXRhOwPkFYDJYWdEIS3u6JcJy3ebIPYbndRs6A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= diff --git a/hack/cspell/main.go b/hack/cspell/main.go new file mode 100644 index 00000000000..631654dd65c --- /dev/null +++ b/hack/cspell/main.go @@ -0,0 +1,277 @@ +package main + +import ( + "bufio" + "encoding/json" + "flag" + "fmt" + "os" + "regexp" + "sort" + "strings" +) + +type Override struct { + Filename string `json:"filename"` + IgnoreWords []string `json:"ignoreWords,omitempty"` +} + +type CspellConfig struct { + Import []string `json:"import"` + IgnorePaths []string `json:"ignorePaths"` + IgnoreWords []string `json:"ignoreWords,omitempty"` + IgnoreRegExpList []string `json:"ignoreRegExpList,omitempty"` + Overrides []Override `json:"overrides"` +} + +func main() { + var cspellOutputFile string + var configFile string + var wordThreshold int + + flag.StringVar(&cspellOutputFile, "output", "", "Path to the cspell output file") + flag.StringVar(&configFile, "config", ".cspell.json", "Path to the cspell config file") + flag.IntVar(&wordThreshold, "threshold", 10, "Word frequency threshold for adding to ignoreWords") + + flag.Parse() + + if cspellOutputFile == "" || configFile == "" { + flag.Usage() + os.Exit(1) + } + + // デフォルトの設定 + config := CspellConfig{ + Import: []string{ + "@cspell/dict-cpp/cspell-ext.json", + "@cspell/dict-docker/cspell-ext.json", + "@cspell/dict-en_us/cspell-ext.json", + "@cspell/dict-fullstack/cspell-ext.json", + "@cspell/dict-git/cspell-ext.json", + "@cspell/dict-golang/cspell-ext.json", + "@cspell/dict-k8s/cspell-ext.json", + "@cspell/dict-makefile/cspell-ext.json", + "@cspell/dict-markdown/cspell-ext.json", + "@cspell/dict-npm/cspell-ext.json", + "@cspell/dict-public-licenses/cspell-ext.json", + "@cspell/dict-rust/cspell-ext.json", + "@cspell/dict-shell/cspell-ext.json", + }, + IgnorePaths: []string{ + "**/*.ai", + "**/*.drawio", + "**/*.hdf5", + "**/*.key", + "**/*.lock", + "**/*.log", + "**/*.md5", + "**/*.pack", + "**/*.pdf", + "**/*.pem", + "**/*.png", + "**/*.sum", + "**/*.svg", + "**/.git/objects/**", + "**/cmd/agent/core/faiss/faiss", + "**/cmd/agent/core/ngt/ngt", + "**/cmd/agent/sidecar/sidecar", + "**/cmd/discoverer/k8s/discoverer", + "**/cmd/gateway/filter/filter", + "**/cmd/gateway/lb/lb", + "**/cmd/gateway/mirror/mirror", + "**/cmd/index/job/correction/index-correction", + "**/cmd/index/job/creation/index-creation", + "**/cmd/index/job/readreplica/rotate/readreplica-rotate", + "**/cmd/index/job/save/index-save", + "**/cmd/index/operator/index-operator", + "**/cmd/manager/index/index", + "**/cmd/tools/benchmark/job/job", + "**/cmd/tools/benchmark/operator/operator", + "**/cmd/tools/cli/loadtest/loadtest", + "**/internal/core/algorithm/ngt/assets/index", + "**/internal/test/data/agent/ngt/validIndex", + }, + } + + // 既存の.cspell.jsonが存在する場合は読み込む + if _, err := os.Stat(configFile); err == nil { + data, err := os.ReadFile(configFile) + if err != nil { + fmt.Printf("Error reading existing cspell config file: %v\n", err) + os.Exit(1) + } + if err := json.Unmarshal(data, &config); err != nil { + fmt.Printf("Error parsing existing cspell config file: %v\n", err) + os.Exit(1) + } + } + + file, err := os.Open(cspellOutputFile) + if err != nil { + fmt.Printf("Error opening cspell output file: %v\n", err) + os.Exit(1) + } + defer file.Close() + + wordRegex := regexp.MustCompile(`Unknown word \(([^)]+)\)`) + fileWordsMap := make(map[string]map[string]struct{}) + wordFrequency := make(map[string]int) + wordVariations := make(map[string]map[string]struct{}) + suffixes := []string{ + "addr", "addrs", "buf", "cancel", "cfg", "ch", "cnt", "conf", "conn", "ctx", + "dim", "dur", "env", "err", "error", "errors", "errs", "group", "idx", + "len", "mu", "opt", "opts", "pool", "req", "res", "size", "vec", + } + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 2) + if len(parts) < 2 { + continue + } + filename := parts[0] + wordMatch := wordRegex.FindStringSubmatch(line) + if len(wordMatch) == 2 { + word := wordMatch[1] + wordLower := strings.ToLower(word) // 大文字小文字を無視するため小文字に変換 + + if _, exists := fileWordsMap[filename]; !exists { + fileWordsMap[filename] = make(map[string]struct{}) + } + if _, alreadyCounted := fileWordsMap[filename][wordLower]; !alreadyCounted { + wordFrequency[wordLower]++ + } + fileWordsMap[filename][wordLower] = struct{}{} + + // 大文字小文字のバリエーションを記録 + if _, exists := wordVariations[wordLower]; !exists { + wordVariations[wordLower] = make(map[string]struct{}) + } + wordVariations[wordLower][word] = struct{}{} + } + } + + globalIgnoreWords := map[string]struct{}{} + globalIgnoreRegExpList := map[string]struct{}{} + + for wordLower, count := range wordFrequency { + if count >= wordThreshold { + matchedSuffix := false + for _, suffix := range suffixes { + if strings.HasSuffix(wordLower, suffix) && len(wordLower) > len(suffix) { + globalIgnoreRegExpList[fmt.Sprintf(".*%s$", suffix)] = struct{}{} + matchedSuffix = true + break + } + } + if !matchedSuffix { + // バリエーションに基づき、登録するignoreWordsを決定 + for variation := range wordVariations[wordLower] { + globalIgnoreWords[variation] = struct{}{} + } + } + } + } + + // ファイルごとにignoreWordsの設定 + for filename, words := range fileWordsMap { + wordList := []string{} + for wordLower := range words { + matchedSuffix := false + for _, suffix := range suffixes { + if strings.HasSuffix(wordLower, suffix) && len(wordLower) > len(suffix) { + matchedSuffix = true + break + } + } + // パターンにマッチする単語は個別のignoreWordsには登録しない + if !matchedSuffix { + for variation := range wordVariations[wordLower] { + if _, isGlobal := globalIgnoreWords[variation]; !isGlobal { + wordList = append(wordList, variation) + } + } + } + } + + if len(wordList) == 0 { + continue + } + + overrideFound := false + for i, override := range config.Overrides { + if override.Filename == filename { + config.Overrides[i].IgnoreWords = append(config.Overrides[i].IgnoreWords, wordList...) + config.Overrides[i].IgnoreWords = unique(config.Overrides[i].IgnoreWords) + overrideFound = true + break + } + } + if !overrideFound { + config.Overrides = append(config.Overrides, Override{ + Filename: filename, + IgnoreWords: unique(wordList), + }) + } + } + + // 空のIgnoreWordsを持つオーバーライドを削除 + filteredOverrides := []Override{} + for _, override := range config.Overrides { + if len(override.IgnoreWords) > 0 { + filteredOverrides = append(filteredOverrides, override) + } + } + config.Overrides = filteredOverrides + + // OverridesをFilenameでソート + sort.Slice(config.Overrides, func(i, j int) bool { + return config.Overrides[i].Filename < config.Overrides[j].Filename + }) + + // 重複するIgnoreWordsを削除し、ソート + for i := range config.Overrides { + config.Overrides[i].IgnoreWords = unique(config.Overrides[i].IgnoreWords) + sort.Strings(config.Overrides[i].IgnoreWords) + } + + // グローバルignoreWordsとignoreRegExpListも同様に処理 + for word := range globalIgnoreWords { + config.IgnoreWords = append(config.IgnoreWords, word) + } + for pattern := range globalIgnoreRegExpList { + config.IgnoreRegExpList = append(config.IgnoreRegExpList, pattern) + } + config.IgnoreWords = unique(config.IgnoreWords) + sort.Strings(config.IgnoreWords) + config.IgnoreRegExpList = unique(config.IgnoreRegExpList) + sort.Strings(config.IgnoreRegExpList) + + // ファイルに出力 + outputData, err := json.MarshalIndent(config, "", " ") + if err != nil { + fmt.Printf("Error marshaling cspell config to JSON: %v\n", err) + os.Exit(1) + } + + if err := os.WriteFile(configFile, outputData, 0644); err != nil { + fmt.Printf("Error writing cspell config file: %v\n", err) + os.Exit(1) + } + + fmt.Printf("cspell configuration updated successfully and saved to %s\n", configFile) +} + +// unique は文字列のスライスから重複を取り除きます +func unique(slice []string) []string { + seen := map[string]struct{}{} + result := []string{} + for _, item := range slice { + if _, found := seen[item]; !found { + seen[item] = struct{}{} + result = append(result, item) + } + } + return result +} diff --git a/hack/docker/gen/main.go b/hack/docker/gen/main.go index 7750270efc8..34e073c4208 100644 --- a/hack/docker/gen/main.go +++ b/hack/docker/gen/main.go @@ -223,6 +223,9 @@ const ( organization = "vdaas" repository = "vald" defaultBinaryDir = "/usr/bin" + usrLocal = "/usr/local" + usrLocalBinaryDir = usrLocal + "/bin" + usrLocalLibDir = usrLocal + "/lib" defaultBuilderImage = "ghcr.io/vdaas/vald/vald-buildbase" defaultBuilderTag = "nightly" defaultLanguage = "en_US.UTF-8" @@ -230,7 +233,8 @@ const ( defaultRuntimeImage = "gcr.io/distroless/static" defaultRuntimeTag = "nonroot" defaultRuntimeUser = "nonroot:nonroot" - defaultBuildUser = "root:root" + rootUser = "root" + defaultBuildUser = rootUser + ":" + rootUser defaultBuildStageName = "builder" maintainerKey = "MAINTAINER" minimumArgumentLength = 2 @@ -274,28 +278,28 @@ var ( defaultEnvironments = map[string]string{ "DEBIAN_FRONTEND": "noninteractive", - "HOME": "/root", - "USER": "root", + "HOME": "/" + rootUser, + "USER": rootUser, "INITRD": "No", "LANG": defaultLanguage, "LANGUAGE": defaultLanguage, "LC_ALL": defaultLanguage, "ORG": organization, "TZ": "Etc/UTC", - "PATH": "${PATH}:/usr/local/bin", + "PATH": "${PATH}:" + usrLocalBinaryDir, "REPO": repository, } goDefaultEnvironments = map[string]string{ "GOROOT": "/opt/go", "GOPATH": "/go", "GO111MODULE": "on", - "PATH": "${PATH}:${GOROOT}/bin:${GOPATH}/bin:/usr/local/bin", + "PATH": "${PATH}:${GOROOT}/bin:${GOPATH}/bin:" + usrLocalBinaryDir, } rustDefaultEnvironments = map[string]string{ - "RUST_HOME": "/usr/loacl/lib/rust", + "RUST_HOME": usrLocalLibDir + "/rust", "RUSTUP_HOME": "${RUST_HOME}/rustup", "CARGO_HOME": "${RUST_HOME}/cargo", - "PATH": "${PATH}:${RUSTUP_HOME}/bin:${CARGO_HOME}/bin:/usr/local/bin", + "PATH": "${PATH}:${RUSTUP_HOME}/bin:${CARGO_HOME}/bin:" + usrLocalBinaryDir, } clangDefaultEnvironments = map[string]string{ "CC": "gcc", @@ -372,7 +376,6 @@ var ( "make kubelinter/install", "make reviewdog/install", "make tparse/install", - "make valdcli/install", "make yq/install", "make minikube/install", "make stern/install", @@ -597,7 +600,7 @@ func main() { "OPERATOR_SDK_VERSION": "latest", }, ExtraCopies: []string{ - "--from=operator /usr/local/bin/${APP_NAME} {{$.BinDir}}/${APP_NAME}", + "--from=operator " + usrLocalBinaryDir + "/${APP_NAME} {{$.BinDir}}/${APP_NAME}", }, ExtraImages: []string{ "quay.io/operator-framework/helm-operator:${OPERATOR_SDK_VERSION} AS operator", @@ -790,9 +793,9 @@ func main() { data.RootDir = "${HOME}" data.Environments["ROOTDIR"] = os.Args[1] } - if strings.Contains(data.BuildUser, "root") { - data.Environments["HOME"] = "/root" - data.Environments["USER"] = "root" + if strings.Contains(data.BuildUser, rootUser) { + data.Environments["HOME"] = "/" + rootUser + data.Environments["USER"] = rootUser } else { user := data.BuildUser if strings.Contains(user, ":") { diff --git a/internal/backoff/backoff_test.go b/internal/backoff/backoff_test.go index 0549983bbbe..1a57a004b21 100644 --- a/internal/backoff/backoff_test.go +++ b/internal/backoff/backoff_test.go @@ -288,7 +288,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return nil, false, err } @@ -317,7 +317,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return str, true, err } @@ -346,7 +346,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ @@ -380,7 +380,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ @@ -413,7 +413,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return str, true, err } @@ -442,7 +442,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return str, true, err } @@ -470,7 +470,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx, cancel := context.WithCancel(context.Background()) - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { cancel() return str, true, err @@ -499,7 +499,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx, cancel := context.WithCancel(context.Background()) - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ @@ -532,7 +532,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ diff --git a/internal/cache/gache/option_test.go b/internal/cache/gache/option_test.go index 1e35584599b..625e1229aa9 100644 --- a/internal/cache/gache/option_test.go +++ b/internal/cache/gache/option_test.go @@ -60,7 +60,7 @@ func TestDefaultOptions(t *testing.T) { tests := []test{ { - name: "set succuess", + name: "set success", want: want{ want: &cache[any]{ gache: gache.New[any](), @@ -122,7 +122,7 @@ func TestWithGache(t *testing.T) { func() test { ga := gache.New[any]() return test{ - name: "set succuess when g is not nil", + name: "set success when g is not nil", args: args{ g: ga, }, @@ -135,7 +135,7 @@ func TestWithGache(t *testing.T) { }(), func() test { return test{ - name: "set succuess when g is nil", + name: "set success when g is nil", want: want{ want: new(T), }, @@ -195,7 +195,7 @@ func TestWithExpiredHook(t *testing.T) { func() test { fn := func(context.Context, string) {} return test{ - name: "set succuess when f is not nil", + name: "set success when f is not nil", args: args{ f: fn, }, @@ -214,7 +214,7 @@ func TestWithExpiredHook(t *testing.T) { }(), func() test { return test{ - name: "set succuess when fn is nil", + name: "set success when fn is nil", want: want{ want: new(T), }, @@ -272,7 +272,7 @@ func TestWithExpireDuration(t *testing.T) { tests := []test{ { - name: "set succuess when dur is 0", + name: "set success when dur is 0", args: args{ dur: 0, }, @@ -281,7 +281,7 @@ func TestWithExpireDuration(t *testing.T) { }, }, { - name: "set succuess when dur is not 0", + name: "set success when dur is not 0", args: args{ dur: 10, }, @@ -343,7 +343,7 @@ func TestWithExpireCheckDuration(t *testing.T) { tests := []test{ { - name: "set succuess when dur is 0", + name: "set success when dur is 0", args: args{ dur: 0, }, @@ -352,7 +352,7 @@ func TestWithExpireCheckDuration(t *testing.T) { }, }, { - name: "set succuess when dur is not 0", + name: "set success when dur is not 0", args: args{ dur: 10, }, diff --git a/internal/cache/option.go b/internal/cache/option.go index ebedb19099e..ef5a7583ae0 100644 --- a/internal/cache/option.go +++ b/internal/cache/option.go @@ -55,7 +55,7 @@ func WithType[V any](mo string) Option[V] { } } -// WithExpireDuration returns Option after set expireDur when dur is cprrect param. +// WithExpireDuration returns Option after set expireDur when dur is correct param. func WithExpireDuration[V any](dur string) Option[V] { return func(c *cache[V]) { if len(dur) == 0 { @@ -69,7 +69,7 @@ func WithExpireDuration[V any](dur string) Option[V] { } } -// WithExpireCheckDuration returns Option after set expireCheckDur when dur is cprrect param. +// WithExpireCheckDuration returns Option after set expireCheckDur when dur is correct param. func WithExpireCheckDuration[V any](dur string) Option[V] { return func(c *cache[V]) { if len(dur) == 0 { diff --git a/internal/circuitbreaker/breaker.go b/internal/circuitbreaker/breaker.go index 29c499c000c..81f3a455efa 100644 --- a/internal/circuitbreaker/breaker.go +++ b/internal/circuitbreaker/breaker.go @@ -35,7 +35,7 @@ type breaker struct { minSamples int64 openTimeout time.Duration openExp int64 // unix time - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 // unix time } @@ -172,7 +172,7 @@ func (b *breaker) currentState() State { func (b *breaker) reset() { atomic.StoreInt32(&b.tripped, 0) atomic.StoreInt64(&b.openExp, 0) - atomic.StoreInt64(&b.closedRefreshExp, time.Now().Add(b.cloedRefreshTimeout).UnixNano()) + atomic.StoreInt64(&b.closedRefreshExp, time.Now().Add(b.closedRefreshTimeout).UnixNano()) b.count.reset() } diff --git a/internal/circuitbreaker/breaker_test.go b/internal/circuitbreaker/breaker_test.go index 234e6c2cb61..d0fd5ef680d 100644 --- a/internal/circuitbreaker/breaker_test.go +++ b/internal/circuitbreaker/breaker_test.go @@ -35,7 +35,7 @@ func Test_breaker_isReady(t *testing.T) { minSamples int64 openTimeout time.Duration openExp int64 - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 } type want struct { @@ -162,7 +162,7 @@ func Test_breaker_isReady(t *testing.T) { minSamples: test.fields.minSamples, openTimeout: test.fields.openTimeout, openExp: test.fields.openExp, - cloedRefreshTimeout: test.fields.cloedRefreshTimeout, + closedRefreshTimeout: test.fields.closedRefreshTimeout, closedRefreshExp: test.fields.closedRefreshExp, } @@ -186,7 +186,7 @@ func Test_breaker_success(t *testing.T) { minSamples int64 openTimeout time.Duration openExp int64 - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 } type want struct{} @@ -281,7 +281,7 @@ func Test_breaker_success(t *testing.T) { minSamples: test.fields.minSamples, openTimeout: test.fields.openTimeout, openExp: test.fields.openExp, - cloedRefreshTimeout: test.fields.cloedRefreshTimeout, + closedRefreshTimeout: test.fields.closedRefreshTimeout, closedRefreshExp: test.fields.closedRefreshExp, } if test.afterFunc != nil { @@ -308,7 +308,7 @@ func Test_breaker_fail(t *testing.T) { minSamples int64 openTimeout time.Duration openExp int64 - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 } type want struct{} @@ -409,7 +409,7 @@ func Test_breaker_fail(t *testing.T) { t.Errorf("state changed: %d", b.tripped) } if total := b.count.Total(); total == 0 { - t.Errorf("count reseted: %d", total) + t.Errorf("count resetted: %d", total) } }, } @@ -439,7 +439,7 @@ func Test_breaker_fail(t *testing.T) { minSamples: test.fields.minSamples, openTimeout: test.fields.openTimeout, openExp: test.fields.openExp, - cloedRefreshTimeout: test.fields.cloedRefreshTimeout, + closedRefreshTimeout: test.fields.closedRefreshTimeout, closedRefreshExp: test.fields.closedRefreshExp, } if test.afterFunc != nil { @@ -564,7 +564,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct { @@ -613,7 +613,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -647,7 +647,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -689,7 +689,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -713,7 +713,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct { @@ -749,7 +749,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -779,7 +779,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -821,7 +821,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -845,7 +845,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct{} @@ -876,7 +876,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -906,7 +906,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -948,7 +948,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -972,7 +972,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct{} @@ -1003,7 +1003,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1033,7 +1033,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1075,7 +1075,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -1099,7 +1099,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct { @@ -1135,7 +1135,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1165,7 +1165,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1207,7 +1207,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // diff --git a/internal/circuitbreaker/options.go b/internal/circuitbreaker/options.go index f48337a18e5..9b02e8abe08 100644 --- a/internal/circuitbreaker/options.go +++ b/internal/circuitbreaker/options.go @@ -131,7 +131,7 @@ func WithClosedRefreshTimeout(timeout string) BreakerOption { if err != nil { return errors.NewErrInvalidOption("closedRefreshTimeout", timeout, err) } - b.cloedRefreshTimeout = d + b.closedRefreshTimeout = d return nil } } diff --git a/internal/client/v1/client/vald/vald.go b/internal/client/v1/client/vald/vald.go index 00957b7b7b4..280aa78be8d 100644 --- a/internal/client/v1/client/vald/vald.go +++ b/internal/client/v1/client/vald/vald.go @@ -499,6 +499,28 @@ func (c *client) MultiUpdate( return res, nil } +func (c *client) UpdateTimestamp( + ctx context.Context, in *payload.Object_Timestamp, opts ...grpc.CallOption, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "internal/client/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + _, err = c.c.RoundRobin(ctx, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + res, err = vald.NewValdClient(conn).UpdateTimestamp(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + func (c *client) Upsert( ctx context.Context, in *payload.Upsert_Request, opts ...grpc.CallOption, ) (res *payload.Object_Location, err error) { @@ -1066,6 +1088,18 @@ func (c *singleClient) Update( return c.vc.Update(ctx, in, opts...) } +func (c *singleClient) UpdateTimestamp( + ctx context.Context, in *payload.Object_Timestamp, opts ...grpc.CallOption, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "internal/singleClient/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + return c.vc.UpdateTimestamp(ctx, in, opts...) +} + func (c *singleClient) StreamUpdate( ctx context.Context, opts ...grpc.CallOption, ) (res vald.Update_StreamUpdateClient, err error) { diff --git a/internal/compress/gob_test.go b/internal/compress/gob_test.go index 68545c880f5..ef38462c706 100644 --- a/internal/compress/gob_test.go +++ b/internal/compress/gob_test.go @@ -391,7 +391,7 @@ func Test_gobCompressor_Reader(t *testing.T) { src io.ReadCloser } type fields struct { - transcodr gob.Transcoder + transcoder gob.Transcoder } type want struct { want io.ReadCloser @@ -425,7 +425,7 @@ func Test_gobCompressor_Reader(t *testing.T) { src: rc, }, fields: fields{ - transcodr: &gob.MockTranscoder{ + transcoder: &gob.MockTranscoder{ NewDecoderFunc: func(r io.Reader) gob.Decoder { return dec }, @@ -457,7 +457,7 @@ func Test_gobCompressor_Reader(t *testing.T) { checkFunc = defaultCheckFunc } g := &gobCompressor{ - transcoder: test.fields.transcodr, + transcoder: test.fields.transcoder, } got, err := g.Reader(test.args.src) diff --git a/internal/compress/lz4_test.go b/internal/compress/lz4_test.go index a54240d66a6..989cf1c240e 100644 --- a/internal/compress/lz4_test.go +++ b/internal/compress/lz4_test.go @@ -476,14 +476,14 @@ func Test_lz4Compressor_DecompressVector(t *testing.T) { }, }, { - name: "returns (nil, error) when decompresse fails", + name: "returns (nil, error) when decompress fails", args: args{ bs: []byte("vdaas/vald"), }, fields: fields{ gobc: &MockCompressor{ DecompressVectorFunc: func(bytes []byte) (vector []float32, err error) { - return nil, errors.New("decompresse err") + return nil, errors.New("decompress err") }, }, compressionLevel: 0, @@ -497,7 +497,7 @@ func Test_lz4Compressor_DecompressVector(t *testing.T) { }, want: want{ want: nil, - err: errors.New("decompresse err"), + err: errors.New("decompress err"), }, }, } @@ -653,7 +653,7 @@ func Test_lz4Compressor_Writer(t *testing.T) { w = new(lz4.MockWriter) ) return test{ - name: "returns (io.WriteCloser, nil) when no erro occurs", + name: "returns (io.WriteCloser, nil) when no error occurs", args: args{ dst: dst, }, diff --git a/internal/config/cassandra_test.go b/internal/config/cassandra_test.go index cfe19674983..8ffad551325 100644 --- a/internal/config/cassandra_test.go +++ b/internal/config/cassandra_test.go @@ -283,7 +283,7 @@ func TestCassandra_Bind(t *testing.T) { key := "CASSANDRA_BIND_PASSWORD" val := "cassandra_password" return test{ - name: "return Cassandra struct when Password is set via the envirionment value", + name: "return Cassandra struct when Password is set via the environment value", fields: fields{ Password: "_" + key + "_", }, diff --git a/internal/config/log.go b/internal/config/log.go index 4f8ca0c6020..908e89c59c3 100644 --- a/internal/config/log.go +++ b/internal/config/log.go @@ -24,7 +24,7 @@ type Logging struct { Format string `json:"format" yaml:"format"` } -// Bind returns Logging object whose every value is field value or envirionment value. +// Bind returns Logging object whose every value is field value or environment value. func (l *Logging) Bind() *Logging { l.Logger = GetActualValue(l.Logger) l.Level = GetActualValue(l.Level) diff --git a/internal/core/algorithm/ngt/ngt_test.go b/internal/core/algorithm/ngt/ngt_test.go index 0186f2abed1..db73502cc02 100644 --- a/internal/core/algorithm/ngt/ngt_test.go +++ b/internal/core/algorithm/ngt/ngt_test.go @@ -103,7 +103,7 @@ func TestNew(t *testing.T) { beforeFunc func(args) afterFunc func(*testing.T, NGT) error } - defaultComprators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { + defaultComparators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { return s1 == s2 }))) defaultCheckFunc := func(w want, got NGT, err error, comparators ...comparator.Option) error { @@ -234,7 +234,7 @@ func TestNew(t *testing.T) { } comparators := test.comparators if test.comparators == nil || len(test.comparators) == 0 { - comparators = defaultComprators + comparators = defaultComparators } got, err := New(test.args.opts...) @@ -692,7 +692,7 @@ func Test_gen(t *testing.T) { beforeFunc func(*testing.T, args) afterFunc func(*testing.T, NGT) error } - defaultComprators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { + defaultComparators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { return s1 == s2 }))) defaultCheckFunc := func(_ context.Context, w want, got NGT, err error, comparators ...comparator.Option) error { @@ -839,7 +839,7 @@ func Test_gen(t *testing.T) { } comparators := test.comparators if test.comparators == nil || len(test.comparators) == 0 { - comparators = defaultComprators + comparators = defaultComparators } got, err := gen(test.args.isLoad, test.args.opts...) @@ -1049,7 +1049,7 @@ func Test_ngt_loadOptions(t *testing.T) { }, }, { - name: "load option failed with Ignoreable error", + name: "load option failed with Ignorable error", args: args{ opts: []Option{ func(n *ngt) error { @@ -1107,7 +1107,7 @@ func Test_ngt_loadOptions(t *testing.T) { func Test_ngt_create(t *testing.T) { // This test is skipped because it requires ngt.prop to be set probably. // We cannot initialize ngt.prop since it is C dependencies. - // This function is called by New(), and the ngt.prop is destoried in New(), so we cannot test this function individually. + // This function is called by New(), and the ngt.prop is destroyed in New(), so we cannot test this function individually. t.SkipNow() } @@ -1476,7 +1476,7 @@ func Test_ngt_Search(t *testing.T) { }, }, { - name: "resturn vector id after the nearby vector inserted (uint8)", + name: "return vector id after the nearby vector inserted (uint8)", args: args{ ctx: context.Background(), vec: []float32{1, 2, 3, 4, 5, 6, 7, 8, 9}, @@ -1653,7 +1653,7 @@ func Test_ngt_Search(t *testing.T) { }, }, { - name: "resturn vector id after the nearby vector inserted (float)", + name: "return vector id after the nearby vector inserted (float)", args: args{ ctx: context.Background(), vec: []float32{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.91}, diff --git a/internal/db/rdb/mysql/dbr/dbr.go b/internal/db/rdb/mysql/dbr/dbr.go index e123403154d..00702c9d6ba 100644 --- a/internal/db/rdb/mysql/dbr/dbr.go +++ b/internal/db/rdb/mysql/dbr/dbr.go @@ -18,7 +18,7 @@ package dbr import dbr "github.com/gocraft/dbr/v2" -// DBR repreesnts the interface to create connection to MySQL. +// DBR represents the interface to create connection to MySQL. type DBR interface { Open(driver, dsn string, log EventReceiver) (Connection, error) Eq(col string, val any) Builder diff --git a/internal/db/rdb/mysql/dbr/insert.go b/internal/db/rdb/mysql/dbr/insert.go index 8d96e916c8e..d15a9865fa0 100644 --- a/internal/db/rdb/mysql/dbr/insert.go +++ b/internal/db/rdb/mysql/dbr/insert.go @@ -34,13 +34,13 @@ type insertStmt struct { *dbr.InsertStmt } -// Columns set colums to the insertStmt. +// Columns set column to the insertStmt. func (stmt *insertStmt) Columns(column ...string) InsertStmt { stmt.InsertStmt = stmt.InsertStmt.Columns(column...) return stmt } -// ExecContext execure inserting to the database. +// ExecContext execute inserting to the database. func (stmt *insertStmt) ExecContext(ctx context.Context) (sql.Result, error) { return stmt.InsertStmt.ExecContext(ctx) } diff --git a/internal/db/rdb/mysql/dbr/session.go b/internal/db/rdb/mysql/dbr/session.go index c1d8f60918c..f6700947c15 100644 --- a/internal/db/rdb/mysql/dbr/session.go +++ b/internal/db/rdb/mysql/dbr/session.go @@ -39,7 +39,7 @@ func NewSession(conn Connection, event EventReceiver) Session { return conn.NewSession(event) } -// SeleSelect creates and returns the SelectStmt. +// Select creates and returns the SelectStmt. func (sess *session) Select(column ...string) SelectStmt { return &selectStmt{ sess.Session.Select(column...), @@ -56,7 +56,7 @@ func (sess *session) Begin() (Tx, error) { // Close closes the database and prevents new queries from starting. // Close then waits for all queries that have started processing on the server to finish. -// Close returns the errro if something goes worng during close. +// Close returns the error if something goes wrong during close. func (sess *session) Close() error { return sess.Session.Close() } diff --git a/internal/db/rdb/mysql/dbr/tx.go b/internal/db/rdb/mysql/dbr/tx.go index ce5dc2d02d7..3713766f34f 100644 --- a/internal/db/rdb/mysql/dbr/tx.go +++ b/internal/db/rdb/mysql/dbr/tx.go @@ -43,7 +43,7 @@ func (t *tx) Rollback() error { return t.Tx.Rollback() } -// RollbackUnlessCommitted rollsback the transaction unless it has already been committed or rolled back. +// RollbackUnlessCommitted rollbacks the transaction unless it has already been committed or rolled back. func (t *tx) RollbackUnlessCommitted() { t.Tx.RollbackUnlessCommitted() } diff --git a/internal/db/rdb/mysql/mysql_test.go b/internal/db/rdb/mysql/mysql_test.go index c741df72cdd..4523b818afc 100644 --- a/internal/db/rdb/mysql/mysql_test.go +++ b/internal/db/rdb/mysql/mysql_test.go @@ -1818,7 +1818,7 @@ func Test_mySQLClient_SetVector(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -1916,7 +1916,7 @@ func Test_mySQLClient_SetVector(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2013,7 +2013,7 @@ func Test_mySQLClient_SetVector(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2512,7 +2512,7 @@ func Test_mySQLClient_SetVectors(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2612,7 +2612,7 @@ func Test_mySQLClient_SetVectors(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2711,7 +2711,7 @@ func Test_mySQLClient_SetVectors(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -3022,7 +3022,7 @@ func Test_mySQLClient_DeleteVector(t *testing.T) { func() test { err := errors.ErrMySQLTransactionNotCreated return test{ - name: "return error when transacton is nil", + name: "return error when transaction is nil", args: args{ ctx: context.Background(), uuid: "vald-01", @@ -3217,9 +3217,9 @@ func Test_mySQLClient_DeleteVector(t *testing.T) { } }(), func() test { - err := errors.New("podIPTableNmae error") + err := errors.New("podIPTableName error") return test{ - name: "return error when DeleteFromFunc(podIPTableNmae) returns error", + name: "return error when DeleteFromFunc(podIPTableName) returns error", args: args{ ctx: context.Background(), uuid: "vald-01", @@ -3813,7 +3813,7 @@ func Test_mySQLClient_SetIPs(t *testing.T) { RollbackUnlessCommittedFunc: func() {}, InsertIntoFunc: func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -3885,7 +3885,7 @@ func Test_mySQLClient_SetIPs(t *testing.T) { RollbackUnlessCommittedFunc: func() {}, InsertIntoFunc: func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { diff --git a/internal/db/rdb/mysql/option.go b/internal/db/rdb/mysql/option.go index 7eac41b6d55..4a4c8b31947 100644 --- a/internal/db/rdb/mysql/option.go +++ b/internal/db/rdb/mysql/option.go @@ -182,7 +182,7 @@ func WithConnectionLifeTimeLimit(dur string) Option { } // WithMaxIdleConns returns the option to set the maxIdleConns. -// If conns is negative numner, no idle connections are retained. +// If conns is negative number, no idle connections are retained. // ref: https://golang.org/src/database/sql/sql.go?s=24983:25019#L879 func WithMaxIdleConns(conns int) Option { return func(m *mySQLClient) error { @@ -194,7 +194,7 @@ func WithMaxIdleConns(conns int) Option { } // WithMaxOpenConns returns the option to set the maxOpenConns. -// If conns is negative numner, no limit on the number of open connections. +// If conns is negative number, no limit on the number of open connections. // ref: https://golang.org/src/database/sql/sql.go?s=24983:25019#L923 func WithMaxOpenConns(conns int) Option { return func(m *mySQLClient) error { diff --git a/internal/db/storage/blob/cloudstorage/option.go b/internal/db/storage/blob/cloudstorage/option.go index 28ccb2a0568..89561867e70 100644 --- a/internal/db/storage/blob/cloudstorage/option.go +++ b/internal/db/storage/blob/cloudstorage/option.go @@ -39,7 +39,7 @@ func WithURL(str string) Option { } } -// WithURLOpener returns Option that sets c.urlOpner. +// WithURLOpener returns Option that sets c.urlOpener. func WithURLOpener(uo *gcsblob.URLOpener) Option { return func(c *client) error { if uo != nil { diff --git a/internal/db/storage/blob/s3/reader/option.go b/internal/db/storage/blob/s3/reader/option.go index d71350d2276..8a71e59fa2d 100644 --- a/internal/db/storage/blob/s3/reader/option.go +++ b/internal/db/storage/blob/s3/reader/option.go @@ -60,7 +60,7 @@ func WithBucket(bucket string) Option { } } -// WithMaxChunkSize retunrs the option to set the maxChunkSize. +// WithMaxChunkSize returns the option to set the maxChunkSize. func WithMaxChunkSize(size int64) Option { return func(r *reader) { r.maxChunkSize = size diff --git a/internal/db/storage/blob/s3/s3_test.go b/internal/db/storage/blob/s3/s3_test.go index 3b4532c69bb..04e60234728 100644 --- a/internal/db/storage/blob/s3/s3_test.go +++ b/internal/db/storage/blob/s3/s3_test.go @@ -389,7 +389,7 @@ func Test_client_Close(t *testing.T) { } tests := []test{ { - name: "retursn nil", + name: "returns nil", want: want{ err: nil, }, diff --git a/internal/db/storage/blob/s3/session/session_test.go b/internal/db/storage/blob/s3/session/session_test.go index ba4eb8c8f87..50389e4f04a 100644 --- a/internal/db/storage/blob/s3/session/session_test.go +++ b/internal/db/storage/blob/s3/session/session_test.go @@ -503,7 +503,7 @@ func Test_sess_Session(t *testing.T) { }, }, { - name: "set EnableParamValdiation success", + name: "set EnableParamValidation success", fields: fields{ enableParamValidation: true, }, @@ -532,7 +532,7 @@ func Test_sess_Session(t *testing.T) { }, }, { - name: "set Enable100Conitnue success", + name: "set Enable100Continue success", fields: fields{ enable100Continue: true, }, diff --git a/internal/errors/agent.go b/internal/errors/agent.go index da16986adea..a25d13d043a 100644 --- a/internal/errors/agent.go +++ b/internal/errors/agent.go @@ -115,4 +115,9 @@ var ( // ErrWriteOperationToReadReplica represents an error that when a write operation is made to read replica. ErrWriteOperationToReadReplica = New("write operation to read replica is not possible") + + // ErrInvalidTimestamp represents a function to generate an error that the timestamp is invalid. + ErrInvalidTimestamp = func(ts int64) error { + return Errorf("invalid timestamp detected", ts) + } ) diff --git a/internal/errors/corrector.go b/internal/errors/corrector.go index 5fbc08f44b1..89f3434842c 100644 --- a/internal/errors/corrector.go +++ b/internal/errors/corrector.go @@ -37,7 +37,7 @@ var ErrFailedToReceiveVectorFromStream = New("failed to receive vector from stre // ErrFailedToCheckConsistency represents an error that failed to check consistency process while index correction process. var ErrFailedToCheckConsistency = func(err error) error { - return Wrap(err, "failed to check consistency while index correctioin process") + return Wrap(err, "failed to check consistency while index correction process") } // ErrStreamListObjectStreamFinishedUnexpectedly represents an error that StreamListObject finished not because of io.EOF. diff --git a/internal/errors/grpc.go b/internal/errors/grpc.go index 4df32439123..9a39aae5db0 100644 --- a/internal/errors/grpc.go +++ b/internal/errors/grpc.go @@ -63,7 +63,7 @@ var ( // ErrGRPCUnexpectedStatusError represents an error that the gRPC status code is undefined. ErrGRPCUnexpectedStatusError = func(code string, err error) error { - return Wrapf(err, "unexcepted error detected: code %s", code) + return Wrapf(err, "unexpected error detected: code %s", code) } // ErrInvalidProtoMessageType represents an error that the gRPC protocol buffers message type is invalid. diff --git a/internal/errors/net.go b/internal/errors/net.go index d21de4845af..1a64fa9cf6f 100644 --- a/internal/errors/net.go +++ b/internal/errors/net.go @@ -37,7 +37,7 @@ var ( return Errorf("no port available for Host: %s\tbetween %d ~ %d", host, start, end) } - // ErrLookupIPAddrNotFound represents a function to generate an error that the host's ip address could not discovererd from DNS. + // ErrLookupIPAddrNotFound represents a function to generate an error that the host's ip address could not discovered from DNS. ErrLookupIPAddrNotFound = func(host string) error { return Errorf("failed to lookup ip addrs for host: %s", host) } diff --git a/internal/errors/option_test.go b/internal/errors/option_test.go index 8fba808ddee..f5c904bbdca 100644 --- a/internal/errors/option_test.go +++ b/internal/errors/option_test.go @@ -50,7 +50,7 @@ func TestNewErrInvalidOption(t *testing.T) { name := "WithPort" val := 9000 return test{ - name: "return ErrInvalidOpton when name and val have a value and errs is empty.", + name: "return ErrInvalidOption when name and val have a value and errs is empty.", args: args{ name: name, val: val, @@ -70,7 +70,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := errs[0] return test{ - name: "return ErrInvalidOpton when all of parameter has value.", + name: "return ErrInvalidOption when all of parameter has value.", args: args{ name: name, val: val, @@ -93,7 +93,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := errs[1] return test{ - name: "return ErrInvalidOpton when all of parameter has value and errs has nil as value.", + name: "return ErrInvalidOption when all of parameter has value and errs has nil as value.", args: args{ name: name, val: val, @@ -115,7 +115,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := Wrap(errs[1], errs[0].Error()) return test{ - name: "return ErrInvalidOpton when name is nil and val and errs have values.", + name: "return ErrInvalidOption when name is nil and val and errs have values.", args: args{ val: val, errs: errs, @@ -136,7 +136,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := Wrap(errs[1], errs[0].Error()) return test{ - name: "return ErrInvalidOpton when val is nil and name and errs have values.", + name: "return ErrInvalidOption when val is nil and name and errs have values.", args: args{ name: name, errs: errs, diff --git a/internal/errors/redis.go b/internal/errors/redis.go index c27b47af563..99fd5421327 100644 --- a/internal/errors/redis.go +++ b/internal/errors/redis.go @@ -21,7 +21,7 @@ var ( // ErrRedisInvalidKVVKPrefix represents a function to generate an error that kv index and vk prefix are invalid. ErrRedisInvalidKVVKPrefix = func(kv, vk string) error { - return Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", kv, vk) + return Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", kv, vk) } // ErrRedisNotFoundIdentity generates an RedisNotFoundIdentityError error. diff --git a/internal/errors/redis_test.go b/internal/errors/redis_test.go index c399adf6e7c..8355f9fba3a 100644 --- a/internal/errors/redis_test.go +++ b/internal/errors/redis_test.go @@ -24,7 +24,7 @@ import ( "github.com/vdaas/vald/internal/test/goleak" ) -func TestErrRedisInvalidKVVKPrefic(t *testing.T) { +func TestErrRedisInvalidKVVKPrefix(t *testing.T) { type fields struct { kv string vk string @@ -56,7 +56,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { vk: str, }, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", str, str), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", str, str), }, } }(), @@ -67,7 +67,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { kv: str, }, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", str, ""), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", str, ""), }, } }(), @@ -78,7 +78,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { vk: str, }, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", "", str), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", "", str), }, } }(), @@ -87,7 +87,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { name: "return an ErrRedisInvalidKVVKPrefix error when kv and vk are empty", fields: fields{}, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", "", ""), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", "", ""), }, } }(), @@ -167,7 +167,7 @@ func TestErrRedisNotFoundIdentity(t *testing.T) { } } -func TestErrRdisNotFound(t *testing.T) { +func TestErrRedisNotFound(t *testing.T) { type fields struct { key string } @@ -304,7 +304,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { tests := []test{ func() test { return test{ - name: "return a wraped error when key is not empty and err is not nil", + name: "return a wrapped error when key is not empty and err is not nil", fields: fields{ key: key, err: err, @@ -316,7 +316,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is not empty and err is nil", + name: "return a wrapped error when key is not empty and err is nil", fields: fields{ key: key, }, @@ -327,7 +327,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is not nil", + name: "return a wrapped error when key is empty and err is not nil", fields: fields{ err: err, }, @@ -338,7 +338,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is nil", + name: "return a wrapped error when key is empty and err is nil", fields: fields{}, want: want{ want: Wrap(nil, "Failed to fetch key ()"), @@ -396,7 +396,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { tests := []test{ func() test { return test{ - name: "return a wraped error when key is not empty and err is not nil", + name: "return a wrapped error when key is not empty and err is not nil", fields: fields{ key: key, err: err, @@ -408,7 +408,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is not empty and err is nil", + name: "return a wrapped error when key is not empty and err is nil", fields: fields{ key: key, }, @@ -419,7 +419,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is not nil", + name: "return a wrapped error when key is empty and err is not nil", fields: fields{ err: err, }, @@ -430,7 +430,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is nil", + name: "return a wrapped error when key is empty and err is nil", fields: fields{}, want: want{ want: Wrap(nil, "Failed to set key ()"), @@ -488,7 +488,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { tests := []test{ func() test { return test{ - name: "return a wraped error when key is not empty and err is not nil", + name: "return a wrapped error when key is not empty and err is not nil", fields: fields{ key: key, err: err, @@ -500,7 +500,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is not empty and err is nil", + name: "return a wrapped error when key is not empty and err is nil", fields: fields{ key: key, }, @@ -511,7 +511,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is not nil", + name: "return a wrapped error when key is empty and err is not nil", fields: fields{ err: err, }, @@ -522,7 +522,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is nil", + name: "return a wrapped error when key is empty and err is nil", fields: fields{}, want: want{ want: Wrap(nil, "Failed to delete key ()"), diff --git a/internal/errors/tls.go b/internal/errors/tls.go index 00357926c90..e714347ddf2 100644 --- a/internal/errors/tls.go +++ b/internal/errors/tls.go @@ -20,10 +20,10 @@ package errors var ( // TLS. - // ErrTLSDisabled is error variable, it's replesents config error that tls is disabled by config. + // ErrTLSDisabled is error variable, it's represents config error that tls is disabled by config. ErrTLSDisabled = New("tls feature is disabled") - // ErrTLSCertOrKeyNotFound is error variable, it's replesents tls cert or key not found error. + // ErrTLSCertOrKeyNotFound is error variable, it's represents tls cert or key not found error. ErrTLSCertOrKeyNotFound = New("cert or key file path not found") ErrCertificationFailed = New("certification failed") diff --git a/internal/errors/vald.go b/internal/errors/vald.go index 949489a0b71..7ba363fcdea 100644 --- a/internal/errors/vald.go +++ b/internal/errors/vald.go @@ -25,7 +25,7 @@ var ( // ErrSameVectorAlreadyExists represents an error that vald already has same features vector data. ErrSameVectorAlreadyExists = func(meta string, n, o []float32) error { - return Errorf("vald metadata:\t%s\talready exists reqested: %v, stored: %v", meta, n, o) + return Errorf("vald metadata:\t%s\talready exists requested: %v, stored: %v", meta, n, o) } // ErrMetaDataCannotFetch represents an error that vald metadata cannot fetch. diff --git a/internal/info/info.go b/internal/info/info.go index 6f5f31e0f93..223a507b07b 100644 --- a/internal/info/info.go +++ b/internal/info/info.go @@ -280,7 +280,7 @@ func (d Detail) String() string { return "\n" + strings.Join(strs, "\n") } -// Get returns parased Detail object. +// Get returns parsed Detail object. func (i *info) Get() Detail { i.prepare() return i.getDetail() diff --git a/internal/log/option_test.go b/internal/log/option_test.go index 62bf9511fd6..64a9ebecf3c 100644 --- a/internal/log/option_test.go +++ b/internal/log/option_test.go @@ -148,7 +148,7 @@ func TestWithLoggerType(t *testing.T) { { name: "set nothing when str is invalid", args: args{ - str: "valdvaldinvalid", + str: "invalid", }, want: want{ obj: new(T), @@ -227,7 +227,7 @@ func TestWithLevel(t *testing.T) { { name: "set nothing when str is invalid", args: args{ - str: "valdvaldinvalid", + str: "invalid", }, want: want{ obj: new(T), @@ -312,7 +312,7 @@ func TestWithFormat(t *testing.T) { return test{ name: "set nothing when str is invalid", args: args{ - str: "valdvaldinvalid", + str: "invalid", }, want: want{ obj: new(T), diff --git a/internal/net/dialer_test.go b/internal/net/dialer_test.go index 3e366b1f226..22183ede9e8 100644 --- a/internal/net/dialer_test.go +++ b/internal/net/dialer_test.go @@ -1163,7 +1163,7 @@ func Test_dialer_cachedDialer(t *testing.T) { // check the connection made on the same port _, p, _ := net.SplitHostPort(gotConn.RemoteAddr().String()) if p != strconv.Itoa(int(port)) { - return errors.Errorf("unexcepted port number, except: %d, got: %s", port, p) + return errors.Errorf("unexpected port number, except: %d, got: %s", port, p) } // read the output from the server and check if it is equals to the count diff --git a/internal/net/grpc/interceptor/client/metric/metric.go b/internal/net/grpc/interceptor/client/metric/metric.go index 7bcec5833f5..2530fd06c8d 100644 --- a/internal/net/grpc/interceptor/client/metric/metric.go +++ b/internal/net/grpc/interceptor/client/metric/metric.go @@ -40,7 +40,7 @@ const ( func ClientMetricInterceptors() (grpc.UnaryClientInterceptor, grpc.StreamClientInterceptor, error) { meter := metrics.GetMeter() - latencyHistgram, err := meter.Float64Histogram( + latencyHistogram, err := meter.Float64Histogram( latencyMetricsName, metrics.WithDescription("Client latency in milliseconds, by method"), metrics.WithUnit(metrics.Milliseconds), @@ -60,7 +60,7 @@ func ClientMetricInterceptors() (grpc.UnaryClientInterceptor, grpc.StreamClientI record := func(ctx context.Context, method string, err error, latency float64) { attrs := attributesFromError(method, err) - latencyHistgram.Record(ctx, latency, metrics.WithAttributes(attrs...)) + latencyHistogram.Record(ctx, latency, metrics.WithAttributes(attrs...)) completedRPCCnt.Add(ctx, 1, metrics.WithAttributes(attrs...)) } return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { diff --git a/internal/net/grpc/interceptor/server/metric/metric.go b/internal/net/grpc/interceptor/server/metric/metric.go index 574386d4834..d0bba7e075e 100644 --- a/internal/net/grpc/interceptor/server/metric/metric.go +++ b/internal/net/grpc/interceptor/server/metric/metric.go @@ -36,7 +36,7 @@ const ( func MetricInterceptors() (grpc.UnaryServerInterceptor, grpc.StreamServerInterceptor, error) { meter := metrics.GetMeter() - latencyHistgram, err := meter.Float64Histogram( + latencyHistogram, err := meter.Float64Histogram( latencyMetricsName, metrics.WithDescription("Server latency in milliseconds, by method"), metrics.WithUnit(metrics.Milliseconds), @@ -56,7 +56,7 @@ func MetricInterceptors() (grpc.UnaryServerInterceptor, grpc.StreamServerInterce record := func(ctx context.Context, method string, err error, latency float64) { attrs := attributesFromError(method, err) - latencyHistgram.Record(ctx, latency, metrics.WithAttributes(attrs...)) + latencyHistogram.Record(ctx, latency, metrics.WithAttributes(attrs...)) completedRPCCnt.Add(ctx, 1, metrics.WithAttributes(attrs...)) } return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { diff --git a/internal/net/http/json/json_test.go b/internal/net/http/json/json_test.go index a0f3dbef24d..d12189dcf9d 100644 --- a/internal/net/http/json/json_test.go +++ b/internal/net/http/json/json_test.go @@ -377,7 +377,7 @@ func TestErrorHandler(t *testing.T) { } if got, want := w.Code, http.StatusInternalServerError; got != want { - return errors.Errorf("reso code not equals. want: %v, got: %v", http.StatusInternalServerError, got) + return errors.Errorf("response code not equals. want: %v, got: %v", http.StatusInternalServerError, got) } return nil }, @@ -666,7 +666,7 @@ func TestRequest(t *testing.T) { ctx context.Context method string url string - payloyd any + payload any data any } type want struct { @@ -694,7 +694,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "@", url: "/", - payloyd: nil, + payload: nil, data: nil, }, want: want{ @@ -709,7 +709,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "POST", url: "/", - payloyd: 1 + 3i, + payload: 1 + 3i, data: new(any), }, checkFunc: func(w want, err error) error { @@ -730,7 +730,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "POST", url: "/", - payloyd: "1", + payload: "1", data: new(any), }, want: want{ @@ -753,7 +753,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "POST", url: srv.URL, - payloyd: "1", + payload: "1", data: &got, }, want: want{ @@ -791,7 +791,7 @@ func TestRequest(t *testing.T) { checkFunc = defaultCheckFunc } - err := Request(test.args.ctx, test.args.method, test.args.url, test.args.payloyd, test.args.data) + err := Request(test.args.ctx, test.args.method, test.args.url, test.args.payload, test.args.data) if err := checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } diff --git a/internal/tls/tls.go b/internal/tls/tls.go index 64811db689d..7b8869a2f4b 100644 --- a/internal/tls/tls.go +++ b/internal/tls/tls.go @@ -49,7 +49,7 @@ var ( // NewTLSConfig returns a *tls.Config struct or error // This function read TLS configuration and initialize *tls.Config struct. // This function initialize TLS configuration, for example the CA certificate and key to start TLS server. -// Server and CA Certificate, and private key will read from a file from the file path definied in environment variable. +// Server and CA Certificate, and private key will read from a file from the file path defined in environment variable. func New(opts ...Option) (*Config, error) { c, err := newCredential(opts...) if err != nil { diff --git a/internal/worker/queue.go b/internal/worker/queue.go index 0ec5a7c68c9..3abf9d89c08 100644 --- a/internal/worker/queue.go +++ b/internal/worker/queue.go @@ -64,8 +64,8 @@ func NewQueue(opts ...QueueOption) (Queue, error) { return q, nil } -// Start starts execute queueing if queue is not runnnig. -// If queue is already reunning, it returns error. +// Start starts execute queueing if queue is not running. +// If queue is already running, it returns error. // It returns the error channel that the queueing job return. func (q *queue) Start(ctx context.Context) (<-chan error, error) { if q.isRunning() { @@ -132,7 +132,7 @@ func (q *queue) Push(ctx context.Context, job JobFunc) error { } } -// Pop returns (JobFunc, nil) if the channnel, which will be used for queuing job, contains JobFunc. +// Pop returns (JobFunc, nil) if the channel, which will be used for queuing job, contains JobFunc. // It returns (nil ,error) if it failed to pop from the job queue. func (q *queue) Pop(ctx context.Context) (JobFunc, error) { tryCnt := int(q.Len()) + 1 // include the first try diff --git a/internal/worker/queue_option.go b/internal/worker/queue_option.go index 7b432e747a8..a2be4b4c142 100644 --- a/internal/worker/queue_option.go +++ b/internal/worker/queue_option.go @@ -52,7 +52,7 @@ func WithQueueErrGroup(eg errgroup.Group) QueueOption { } // WithQueueCheckDuration returns the option to set the qcdur for queue. -// If dur is invalid string, it returns errror. +// If dur is invalid string, it returns error. func WithQueueCheckDuration(dur string) QueueOption { return func(q *queue) error { if len(dur) == 0 { diff --git a/pkg/agent/core/faiss/service/faiss.go b/pkg/agent/core/faiss/service/faiss.go index a5b11e4f7c3..e46e0467d89 100644 --- a/pkg/agent/core/faiss/service/faiss.go +++ b/pkg/agent/core/faiss/service/faiss.go @@ -915,7 +915,7 @@ func (f *faiss) saveIndex(ctx context.Context) error { // no cleanup invalid index eg, ectx := errgroup.New(ctx) - // we want to ensure the acutal kvs size between kvsdb and metadata, + // we want to ensure the actual kvs size between kvsdb and metadata, // so we create this counter to count the actual kvs size instead of using kvs.Len() var ( kvsLen uint64 diff --git a/pkg/agent/core/ngt/handler/grpc/object_test.go b/pkg/agent/core/ngt/handler/grpc/object_test.go index 39b5521bbb7..6ab2f52a9d8 100644 --- a/pkg/agent/core/ngt/handler/grpc/object_test.go +++ b/pkg/agent/core/ngt/handler/grpc/object_test.go @@ -1300,7 +1300,7 @@ func Test_server_StreamListObject(t *testing.T) { // Call the method under test err = s.StreamListObject(&payload.Object_List_Request{}, &stream) - // Check the errros are joined and its a gRPC error + // Check the errors are joined and its a gRPC error require.ErrorContains(t, err, "foo") require.ErrorContains(t, err, "bar") _, ok := status.FromError(err) diff --git a/pkg/agent/core/ngt/service/ngt.go b/pkg/agent/core/ngt/service/ngt.go index ba0c11319ad..c5bb4de1d51 100644 --- a/pkg/agent/core/ngt/service/ngt.go +++ b/pkg/agent/core/ngt/service/ngt.go @@ -530,7 +530,7 @@ func (n *ngt) load(ctx context.Context, path string, opts ...core.Option) (err e // backupBroken backup index at originPath into brokenDir. // The name of the directory will be timestamp(UnixNano). -// If it exeeds the limit, backupBroken removes the oldest backup directory. +// If it exceeds the limit, backupBroken removes the oldest backup directory. func (n *ngt) backupBroken(ctx context.Context) error { if n.historyLimit <= 0 { return nil @@ -1343,7 +1343,7 @@ func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { } log.Infof("create index operation started, uncommitted indexes = %d", ic) log.Debug("create index delete phase started") - // vqProcessedCnt is a tempral counter to store the number of processed vqueue items. + // vqProcessedCnt is a temporary counter to store the number of processed vqueue items. // This will be added to nopvq after CreateIndex operation succeeds. var vqProcessedCnt uint64 n.vq.RangePopDelete(ctx, now, func(uuid string) bool { @@ -1569,7 +1569,7 @@ func (n *ngt) saveIndex(ctx context.Context) (err error) { beforeNopvq := n.nopvq.Load() defer n.gc() - // since defering here, atomic operations are guaranteed in this scope + // since deferring here, atomic operations are guaranteed in this scope defer n.saving.Store(false) log.Debug("cleanup invalid index started") @@ -1577,7 +1577,7 @@ func (n *ngt) saveIndex(ctx context.Context) (err error) { log.Debug("cleanup invalid index finished") eg, ectx := errgroup.New(ctx) - // we want to ensure the acutal kvs size between kvsdb and metadata, + // we want to ensure the actual kvs size between kvsdb and metadata, // so we create this counter to count the actual kvs size instead of using kvs.Len() var ( kvsLen uint64 diff --git a/pkg/agent/core/ngt/service/ngt_test.go b/pkg/agent/core/ngt/service/ngt_test.go index 71eb5d3b2e8..4cc18bbebe5 100644 --- a/pkg/agent/core/ngt/service/ngt_test.go +++ b/pkg/agent/core/ngt/service/ngt_test.go @@ -650,7 +650,7 @@ func Test_needsBackup(t *testing.T) { tmpDir := t.TempDir() validIndexDir := testdata.GetTestdataPath(testdata.ValidIndex) return test{ - name: "returns false when it's an initaial state", + name: "returns false when it's an initial state", args: args{ path: tmpDir, }, @@ -707,7 +707,7 @@ func Test_needsBackup(t *testing.T) { tmpDir := t.TempDir() validIndexDir := testdata.GetTestdataPath(testdata.ValidIndex) return test{ - name: "returns true when mets.IsInvalid is true", + name: "returns true when meta.IsInvalid is true", args: args{ path: tmpDir, }, @@ -738,7 +738,7 @@ func Test_needsBackup(t *testing.T) { tmpDir := t.TempDir() validIndexDir := testdata.GetTestdataPath(testdata.ValidIndex) return test{ - name: "returns true when mets.IsInvalid is true", + name: "returns true when meta.IsInvalid is true", args: args{ path: tmpDir, }, @@ -1370,7 +1370,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { return } type args struct { - idxes []index + indices []index poolSize uint32 bulkSize int } @@ -1444,7 +1444,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { return test{ name: fmt.Sprintf("insert & upsert %d random and 11 digits added to each vector element", count), args: args{ - idxes: createRandomData(count, &createRandomDataConfig{ + indices: createRandomData(count, &createRandomDataConfig{ additionaldigits: 11, }), poolSize: uint32(count / 10), @@ -1490,7 +1490,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { if err != nil { tt.Errorf("failed to init ngt service, error = %v", err) } - for _, idx := range test.args.idxes { + for _, idx := range test.args.indices { err = n.Insert(idx.uuid, idx.vec) if err := checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) @@ -1508,7 +1508,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { idx := i eg.Go(safety.RecoverFunc(func() error { log.Warnf("started %d-1", idx) - for _, idx := range test.args.idxes[:len(test.args.idxes)/3] { + for _, idx := range test.args.indices[:len(test.args.indices)/3] { _ = n.Delete(idx.uuid) _ = n.Insert(idx.uuid, idx.vec) } @@ -1518,7 +1518,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { eg.Go(safety.RecoverFunc(func() error { log.Warnf("started %d-2", idx) - for _, idx := range test.args.idxes[len(test.args.idxes)/3 : 2*len(test.args.idxes)/3] { + for _, idx := range test.args.indices[len(test.args.indices)/3 : 2*len(test.args.indices)/3] { _ = n.Delete(idx.uuid) _ = n.Insert(idx.uuid, idx.vec) } @@ -1528,7 +1528,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { eg.Go(safety.RecoverFunc(func() error { log.Warnf("started %d-3", idx) - for _, idx := range test.args.idxes[2*len(test.args.idxes)/3:] { + for _, idx := range test.args.indices[2*len(test.args.indices)/3:] { _ = n.Delete(idx.uuid) _ = n.Insert(idx.uuid, idx.vec) } @@ -1569,9 +1569,9 @@ func Test_ngt_E2E(t *testing.T) { beforeFunc func(args) afterFunc func(args) } - multiUpsertRequestGenFunc := func(idxes []index, chunk int) (res []*payload.Upsert_MultiRequest) { + multiUpsertRequestGenFunc := func(indices []index, chunk int) (res []*payload.Upsert_MultiRequest) { reqs := make([]*payload.Upsert_Request, 0, chunk) - for i := 0; i < len(idxes); i++ { + for i := 0; i < len(indices); i++ { if len(reqs) == chunk-1 { res = append(res, &payload.Upsert_MultiRequest{ Requests: reqs, @@ -1580,8 +1580,8 @@ func Test_ngt_E2E(t *testing.T) { } else { reqs = append(reqs, &payload.Upsert_Request{ Vector: &payload.Object_Vector{ - Id: idxes[i].uuid, - Vector: idxes[i].vec, + Id: indices[i].uuid, + Vector: indices[i].vec, }, Config: &payload.Upsert_Config{ SkipStrictExistCheck: true, diff --git a/pkg/agent/internal/kvs/kvs_test.go b/pkg/agent/internal/kvs/kvs_test.go index c9dbd9db149..81fe8953ca8 100644 --- a/pkg/agent/internal/kvs/kvs_test.go +++ b/pkg/agent/internal/kvs/kvs_test.go @@ -229,7 +229,7 @@ func Test_bidi_Get(t *testing.T) { ) return test{ - name: "return the value when there is a value for the key and l of fields is maximun value of uint64", + name: "return the value when there is a value for the key and l of fields is maximum value of uint64", args: args{ key: key, }, @@ -496,7 +496,7 @@ func Test_bidi_GetInverse(t *testing.T) { ) return test{ - name: "return key and timestamp and true when there is a key for the value and l of fields is maximun value of uint64", + name: "return key and timestamp and true when there is a key for the value and l of fields is maximum value of uint64", args: args{ val: val, }, @@ -770,7 +770,7 @@ func Test_bidi_Set(t *testing.T) { ) return test{ - name: "set success when the key is not empty string and val is not 0 and l of fields is maximun value of uint64", + name: "set success when the key is not empty string and val is not 0 and l of fields is maximum value of uint64", args: args{ key: key, val: val, @@ -806,7 +806,7 @@ func Test_bidi_Set(t *testing.T) { ) return test{ - name: "set success when the key is already set and the same key is set twie", + name: "set success when the key is already set and the same key is set twice", args: args{ key: key, val: val, @@ -1071,7 +1071,7 @@ func Test_bidi_Delete(t *testing.T) { ) return test{ - name: "return val and true when the delete successes and l of fields is maximun value of uint64", + name: "return val and true when the delete successes and l of fields is maximum value of uint64", args: args{ key: key, }, @@ -1353,7 +1353,7 @@ func Test_bidi_DeleteInverse(t *testing.T) { ) return test{ - name: "return key and true when the delete successes and l of fields is maximun value of uint64", + name: "return key and true when the delete successes and l of fields is maximum value of uint64", args: args{ val: val, }, @@ -1644,7 +1644,7 @@ func Test_bidi_Range(t *testing.T) { var mu sync.Mutex return test{ - name: "rage get successes when l of fields is maximun value of uint64", + name: "rage get successes when l of fields is maximum value of uint64", args: args{ f: func(s string, u uint32, t int64) bool { mu.Lock() @@ -1743,7 +1743,7 @@ func Test_bidi_Len(t *testing.T) { }, }, { - name: "return maximun value when l of field is maximun value of uint64", + name: "return maximum value when l of field is maximum value of uint64", fields: fields{ l: math.MaxUint64, }, diff --git a/pkg/gateway/lb/handler/grpc/handler.go b/pkg/gateway/lb/handler/grpc/handler.go index acfdfad8f31..555c3d739bf 100644 --- a/pkg/gateway/lb/handler/grpc/handler.go +++ b/pkg/gateway/lb/handler/grpc/handler.go @@ -361,55 +361,17 @@ func (s *server) SearchByID( } return nil, err } - vec, err := s.getObject(ctx, uuid) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) if err != nil { - var ( - attrs trace.Attributes - st *status.Status - msg string - ) - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = nil - default: - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) - if st == nil || st.Code() == codes.NotFound { - err = nil - } - } - if err != nil { - if span != nil { - span.RecordError(err) - span.SetAttributes(attrs...) - span.SetStatus(trace.StatusError, err.Error()) - } - return nil, err + st, msg, err := status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if span != nil && st != nil && st.Code() != codes.NotFound { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, err.Error()) } // try search by using agent's SearchByID method this operation is emergency fallback, the search quality is not same as usual SearchByID operation. res, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { @@ -851,55 +813,17 @@ func (s *server) LinearSearchByID( } return nil, err } - vec, err := s.getObject(ctx, uuid) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) if err != nil { - var ( - attrs trace.Attributes - st *status.Status - msg string - ) - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = nil - default: - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) - if st == nil || st.Code() == codes.NotFound { - err = nil - } - } - if err != nil { - if span != nil { - span.RecordError(err) - span.SetAttributes(attrs...) - span.SetStatus(trace.StatusError, err.Error()) - } - return nil, err + st, msg, err := status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if span != nil && st != nil && st.Code() != codes.NotFound { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, err.Error()) } // try search by using agent's LinearSearchByID method this operation is emergency fallback, the search quality is not same as usual LinearSearchByID operation. res, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { @@ -1778,54 +1702,16 @@ func (s *server) Update( } if !req.GetConfig().GetSkipStrictExistCheck() { - vec, err := s.getObject(ctx, uuid) - if err != nil || vec == nil { - var ( - attrs trace.Attributes - st *status.Status - msg string - ) - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = status.WrapWithNotFound(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API uuid "+uuid+"'s object not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeNotFound(err.Error()) - default: - code := codes.Unknown - if err == nil { - err = errors.ErrObjectIDNotFound(uuid) - code = codes.NotFound - } - st, msg, err = status.ParseError(err, code, vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) - } - if span != nil { + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + if err != nil { + st, msg, err := status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if span != nil && st != nil { span.RecordError(err) - span.SetAttributes(attrs...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -2121,6 +2007,221 @@ func (s *server) MultiUpdate( return locs, errs } +func (s *server) UpdateTimestamp( + ctx context.Context, req *payload.Object_Timestamp, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + uuid := req.GetId() + reqInfo := &errdetails.RequestInfo{ + RequestId: uuid, + ServingData: errdetails.Serialize(req), + } + resInfo := &errdetails.ResourceInfo{ + ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateTimestampRPCName + "." + vald.GetObjectRPCName, + ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), + } + if len(uuid) == 0 { + err = errors.ErrInvalidMetaDataConfig + err = status.WrapWithInvalidArgument(vald.UpdateTimestampRPCName+" API invalid uuid", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "invalid id", + Description: err.Error(), + }, + }, + }) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeInvalidArgument(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + ts := req.GetTimestamp() + if ts < 0 { + err = errors.ErrInvalidTimestamp(ts) + err = status.WrapWithInvalidArgument(vald.UpdateTimestampRPCName+" API invalid vector argument", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "timestamp", + Description: err.Error(), + }, + }, + }, info.Get()) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeInvalidArgument(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + var ( + mu sync.RWMutex + aeCount atomic.Uint64 + updated atomic.Uint64 + ls = make([]string, 0, s.replica) + visited = make(map[string]bool, s.replica) + locs = &payload.Object_Location{ + Uuid: uuid, + Ips: make([]string, 0, s.replica), + } + ) + err = s.gateway.BroadCast(ctx, service.WRITE, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) (err error) { + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "BroadCast/"+target), apiName+"/"+vald.UpdateRPCName+"/"+target) + defer func() { + if span != nil { + span.End() + } + }() + loc, err := vc.UpdateTimestamp(ctx, req, copts...) + if err != nil { + st, ok := status.FromError(err) + if ok && st != nil { + if st.Code() != codes.AlreadyExists && + st.Code() != codes.Canceled && + st.Code() != codes.DeadlineExceeded && + st.Code() != codes.InvalidArgument && + st.Code() != codes.NotFound && + st.Code() != codes.OK && + st.Code() != codes.Unimplemented { + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), fmt.Sprintf("UpdateTimestamp operation for Agent %s failed,\terror: %v", target, err))...) + span.SetStatus(trace.StatusError, err.Error()) + } + return err + } + if st.Code() == codes.AlreadyExists { + host, _, err := net.SplitHostPort(target) + if err != nil { + host = target + } + aeCount.Add(1) + mu.Lock() + visited[target] = true + locs.Ips = append(locs.GetIps(), host) + ls = append(ls, host) + mu.Unlock() + + } + } + return nil + } + if loc != nil { + updated.Add(1) + mu.Lock() + visited[target] = true + locs.Ips = append(locs.GetIps(), loc.GetIps()...) + ls = append(ls, loc.GetName()) + mu.Unlock() + } + return nil + }) + switch { + case err != nil: + st, msg, err := status.ParseError(err, codes.Internal, + "failed to parse "+vald.UpdateRPCName+" gRPC error response", reqInfo, resInfo, info.Get()) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + case len(locs.Ips) <= 0: + err = errors.ErrIndexNotFound + err = status.WrapWithNotFound(vald.UpdateRPCName+" API update target not found", err, reqInfo, resInfo) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeNotFound(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + case updated.Load()+aeCount.Load() < uint64(s.replica): + shortage := s.replica - int(updated.Load()+aeCount.Load()) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + if err != nil { + st, msg, err := status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.UpdateTimestampRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if span != nil && st != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + + err = s.gateway.DoMulti(ctx, shortage, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) (err error) { + mu.RLock() + tf, ok := visited[target] + mu.RUnlock() + if tf && ok { + return errors.Errorf("target: %s already inserted will skip", target) + } + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "DoMulti/"+target), apiName+"/"+vald.InsertRPCName+"/"+target) + defer func() { + if span != nil { + span.End() + } + }() + loc, err := vc.Insert(ctx, &payload.Insert_Request{ + Vector: vec, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Timestamp: ts, + }, + }, copts...) + if err != nil { + st, ok := status.FromError(err) + if ok && st != nil && span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), fmt.Sprintf("Shortage index Insert for Update operation for Agent %s failed,\terror: %v", target, err))...) + span.SetStatus(trace.StatusError, err.Error()) + } + return err + } + if loc != nil { + updated.Add(1) + mu.Lock() + locs.Ips = append(locs.GetIps(), loc.GetIps()...) + ls = append(ls, loc.GetName()) + mu.Unlock() + } + return nil + }) + if err != nil { + st, msg, err := status.ParseError(err, codes.Unknown, vald.InsertRPCName+" API for "+vald.UpdateRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if span != nil && st != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + case updated.Load() == 0 && aeCount.Load() > 0: + err = status.WrapWithAlreadyExists(vald.UpdateRPCName+" API update target same vector already exists", errors.ErrSameVectorAlreadyExists(uuid, nil, nil), reqInfo, resInfo) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeAlreadyExists(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + + } + slices.Sort(ls) + locs.Name = strings.Join(ls, ",") + return locs, nil +} + func (s *server) Upsert( ctx context.Context, req *payload.Upsert_Request, ) (loc *payload.Object_Location, err error) { @@ -2180,48 +2281,23 @@ func (s *server) Upsert( } var shouldInsert bool if !req.GetConfig().GetSkipStrictExistCheck() { - vec, err := s.getObject(ctx, uuid) - var ( - attrs trace.Attributes - st *status.Status - msg string - ) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + var attrs trace.Attributes if err != nil || vec == nil { - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = nil - shouldInsert = true - default: - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + var ( + st *status.Status + msg string + ) + st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if st != nil { attrs = trace.FromGRPCStatus(st.Code(), msg) - if st != nil && st.Code() == codes.NotFound { - err = nil + if st.Code() == codes.NotFound { shouldInsert = true + err = nil } } } else if conv.F32stos(vec.GetVector()) == conv.F32stos(req.GetVector().GetVector()) { @@ -2236,7 +2312,6 @@ func (s *server) Upsert( } return nil, err } - } else { id, err := s.exists(ctx, uuid) if err != nil { diff --git a/pkg/gateway/lb/service/gateway.go b/pkg/gateway/lb/service/gateway.go index 53c259c9e15..2d806b20fcd 100644 --- a/pkg/gateway/lb/service/gateway.go +++ b/pkg/gateway/lb/service/gateway.go @@ -24,6 +24,7 @@ import ( "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/client/v1/client/discoverer" + vc "github.com/vdaas/vald/internal/client/v1/client/vald" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/observability/trace" @@ -94,7 +95,7 @@ func (g *gateway) BroadCast( case <-ictx.Done(): return nil default: - err = f(ictx, addr, vald.NewValdClient(conn), copts...) + err = f(ictx, addr, vc.NewValdClient(conn), copts...) if err != nil { return err } @@ -129,7 +130,7 @@ func (g *gateway) DoMulti( copts ...grpc.CallOption, ) (err error) { if atomic.LoadUint32(&cur) < limit { - err = f(ictx, addr, vald.NewValdClient(conn), copts...) + err = f(ictx, addr, vc.NewValdClient(conn), copts...) if err != nil { return err } @@ -147,7 +148,7 @@ func (g *gateway) DoMulti( if atomic.LoadUint32(&cur) < limit { _, ok := visited.Load(addr) if !ok { - err = f(ictx, addr, vald.NewValdClient(conn), copts...) + err = f(ictx, addr, vc.NewValdClient(conn), copts...) if err != nil { return err } diff --git a/pkg/gateway/mirror/handler/grpc/handler.go b/pkg/gateway/mirror/handler/grpc/handler.go index 45226b4806c..7065b1e1774 100644 --- a/pkg/gateway/mirror/handler/grpc/handler.go +++ b/pkg/gateway/mirror/handler/grpc/handler.go @@ -1173,7 +1173,7 @@ func (s *server) handleInsert( } // In this case, the status code in the result object contains only OK or ALREADY_EXIST. - // And send Update API requst to ALREADY_EXIST cluster using the query requested by the user. + // And send Update API request to ALREADY_EXIST cluster using the query requested by the user. log.Warnf("failed to "+vald.InsertRPCName+" API: %#v", err) resLoc, err := s.handleInsertResult(ctx, alreadyExistsTgts, &payload.Update_Request{ @@ -1743,7 +1743,7 @@ func (s *server) handleUpdate( } // In this case, the status code in the result object contains only OK or ALREADY_EXIST or NOT_FOUND. - // And send Insert API requst to NOT_FOUND cluster using query requested by the user. + // And send Insert API request to NOT_FOUND cluster using query requested by the user. log.Warnf("failed to "+vald.UpdateRPCName+" API: %#v", err) resLoc, err := s.handleUpdateResult(ctx, notFoundTgts, &payload.Insert_Request{ @@ -2382,7 +2382,7 @@ func (s *server) doUpsert( return loc, nil } -// StreamUpsert handles bidirectional streaming for upserting objects. +// StreamUpsert handles bidirectional streaming for upsert objects. // It wraps the bidirectional stream logic for the Upsert RPC method. // For each incoming request in the bidirectional stream, it calls the Upsert function. // The response is then sent back through the stream with the corresponding status or location information. @@ -3348,7 +3348,7 @@ func (s *server) doStreamListObject( err = status.WrapWithCanceled("Stream Recv returned canceld error at "+id, err) attr = trace.StatusCodeCancelled(err.Error()) case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded("Stream Recv returned deadlin exceeded error at "+id, err) + err = status.WrapWithDeadlineExceeded("Stream Recv returned deadline exceeded error at "+id, err) attr = trace.StatusCodeDeadlineExceeded(err.Error()) default: var ( @@ -3387,7 +3387,7 @@ func (s *server) doStreamListObject( err = status.WrapWithCanceled("Stream Send returned canceld error at "+id, err) attr = trace.StatusCodeCancelled(err.Error()) case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded("Stream Send returned deadlin exceeded error at "+id, err) + err = status.WrapWithDeadlineExceeded("Stream Send returned deadline exceeded error at "+id, err) attr = trace.StatusCodeDeadlineExceeded(err.Error()) default: var ( diff --git a/pkg/gateway/mirror/service/mirror.go b/pkg/gateway/mirror/service/mirror.go index b5ab31609bc..5a2e68c9208 100644 --- a/pkg/gateway/mirror/service/mirror.go +++ b/pkg/gateway/mirror/service/mirror.go @@ -64,10 +64,10 @@ func NewMirrorClient(conn *grpc.ClientConn) MirrorClient { } type mirr struct { - addrl sync.Map[string, any] // List of all connected addresses + addrs sync.Map[string, any] // List of all connected addresses selfMirrTgts []*payload.Mirror_Target // Targets of self mirror gateway - selfMirrAddrl sync.Map[string, any] // List of self Mirror gateway addresses - gwAddrl sync.Map[string, any] // List of Vald gateway (LB gateway) addresses + selfMirrAddrs sync.Map[string, any] // List of self Mirror gateway addresses + gwAddrs sync.Map[string, any] // List of Vald gateway (LB gateway) addresses eg errgroup.Group registerDur time.Duration gateway Gateway @@ -90,7 +90,7 @@ func NewMirror(opts ...MirrorOption) (_ Mirror, err error) { } m.selfMirrTgts = make([]*payload.Mirror_Target, 0) - m.selfMirrAddrl.Range(func(addr string, _ any) bool { + m.selfMirrAddrs.Range(func(addr string, _ any) bool { var ( host string port uint16 @@ -317,15 +317,15 @@ func (m *mirr) Connect(ctx context.Context, targets ...*payload.Mirror_Target) e for _, target := range targets { addr := net.JoinHostPort(target.GetHost(), uint16(target.GetPort())) // addr: host:port if !m.isSelfMirrorAddr(addr) && !m.isGatewayAddr(addr) { - _, ok := m.addrl.Load(addr) + _, ok := m.addrs.Load(addr) if !ok || !m.IsConnected(ctx, addr) { _, err := m.gateway.GRPCClient().Connect(ctx, addr) if err != nil { - m.addrl.Delete(addr) + m.addrs.Delete(addr) return err } } - m.addrl.Store(addr, struct{}{}) + m.addrs.Store(addr, struct{}{}) } } return nil @@ -345,13 +345,13 @@ func (m *mirr) Disconnect(ctx context.Context, targets ...*payload.Mirror_Target for _, target := range targets { addr := net.JoinHostPort(target.GetHost(), uint16(target.GetPort())) if !m.isGatewayAddr(addr) { - _, ok := m.addrl.Load(addr) + _, ok := m.addrs.Load(addr) if ok || m.IsConnected(ctx, addr) { if err := m.gateway.GRPCClient().Disconnect(ctx, addr); err != nil && !errors.Is(err, errors.ErrGRPCClientConnNotFound(addr)) { return err } - m.addrl.Delete(addr) + m.addrs.Delete(addr) } } } @@ -366,7 +366,7 @@ func (m *mirr) IsConnected(ctx context.Context, addr string) bool { // MirrorTargets returns the Mirror targets, including the address of this gateway and the addresses of other Mirror gateways // to which this gateway is currently connected. func (m *mirr) MirrorTargets(ctx context.Context) (tgts []*payload.Mirror_Target, err error) { - tgts = make([]*payload.Mirror_Target, 0, m.addrl.Len()) + tgts = make([]*payload.Mirror_Target, 0, m.addrs.Len()) m.RangeMirrorAddr(func(addr string, _ any) bool { if m.IsConnected(ctx, addr) { var ( @@ -391,12 +391,12 @@ func (m *mirr) MirrorTargets(ctx context.Context) (tgts []*payload.Mirror_Target } func (m *mirr) isSelfMirrorAddr(addr string) bool { - _, ok := m.selfMirrAddrl.Load(addr) + _, ok := m.selfMirrAddrs.Load(addr) return ok } func (m *mirr) isGatewayAddr(addr string) bool { - _, ok := m.gwAddrl.Load(addr) + _, ok := m.gwAddrs.Load(addr) return ok } @@ -413,7 +413,7 @@ func (m *mirr) connectedOtherMirrorAddrs(ctx context.Context) (addrs []string) { // RangeMirrorAddr calls f sequentially for each key and value present in the connection map. If f returns false, range stops the iteration. func (m *mirr) RangeMirrorAddr(f func(addr string, _ any) bool) { - m.addrl.Range(func(addr string, value any) bool { + m.addrs.Range(func(addr string, value any) bool { if !m.isGatewayAddr(addr) && !m.isSelfMirrorAddr(addr) { if !f(addr, value) { return false diff --git a/pkg/gateway/mirror/service/mirror_option.go b/pkg/gateway/mirror/service/mirror_option.go index 1b7243c3829..ed605911eb1 100644 --- a/pkg/gateway/mirror/service/mirror_option.go +++ b/pkg/gateway/mirror/service/mirror_option.go @@ -44,7 +44,7 @@ func WithGatewayAddrs(addrs ...string) MirrorOption { return errors.NewErrCriticalOption("lbAddrs", addrs) } for _, addr := range addrs { - m.gwAddrl.Store(addr, struct{}{}) + m.gwAddrs.Store(addr, struct{}{}) } return nil } @@ -57,7 +57,7 @@ func WithSelfMirrorAddrs(addrs ...string) MirrorOption { return errors.NewErrCriticalOption("selfMirrorAddrs", addrs) } for _, addr := range addrs { - m.selfMirrAddrl.Store(addr, struct{}{}) + m.selfMirrAddrs.Store(addr, struct{}{}) } return nil } diff --git a/pkg/gateway/mirror/service/mirror_test.go b/pkg/gateway/mirror/service/mirror_test.go index d23f204fcd5..edf41c81918 100644 --- a/pkg/gateway/mirror/service/mirror_test.go +++ b/pkg/gateway/mirror/service/mirror_test.go @@ -734,10 +734,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // ctx context.Context // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -769,10 +769,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // ctx:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -797,10 +797,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // ctx:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -834,10 +834,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -857,10 +857,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // tgts *payload.Mirror_Targets // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -897,10 +897,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // tgts:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -926,10 +926,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // tgts:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -963,10 +963,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -986,10 +986,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr string // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1022,10 +1022,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1051,10 +1051,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1088,10 +1088,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -1110,10 +1110,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr string // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1145,10 +1145,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1173,10 +1173,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1210,10 +1210,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -1232,10 +1232,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr string // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1267,10 +1267,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1295,10 +1295,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1332,10 +1332,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -1354,10 +1354,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // f func(addr string, _ any) bool // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1384,10 +1384,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // f:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1412,10 +1412,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // f:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1449,10 +1449,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, diff --git a/pkg/gateway/mirror/usecase/vald.go b/pkg/gateway/mirror/usecase/vald.go index 453c7344c4d..12976fda0ee 100644 --- a/pkg/gateway/mirror/usecase/vald.go +++ b/pkg/gateway/mirror/usecase/vald.go @@ -23,9 +23,9 @@ import ( "github.com/vdaas/vald/internal/net" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/observability" - bometrics "github.com/vdaas/vald/internal/observability/metrics/backoff" + backoffmetrics "github.com/vdaas/vald/internal/observability/metrics/backoff" cbmetrics "github.com/vdaas/vald/internal/observability/metrics/circuitbreaker" - mirrmetrics "github.com/vdaas/vald/internal/observability/metrics/gateway/mirror" + mirrormetrics "github.com/vdaas/vald/internal/observability/metrics/gateway/mirror" "github.com/vdaas/vald/internal/runner" "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/servers/server" @@ -135,9 +135,9 @@ func New(cfg *config.Data) (r runner.Runner, err error) { if cfg.Observability.Enabled { obs, err = observability.NewWithConfig( cfg.Observability, - bometrics.New(), + backoffmetrics.New(), cbmetrics.New(), - mirrmetrics.New(m), + mirrormetrics.New(m), ) if err != nil { return nil, err diff --git a/pkg/index/job/correction/service/corrector.go b/pkg/index/job/correction/service/corrector.go index 41a71b2274d..b293b8b92c7 100644 --- a/pkg/index/job/correction/service/corrector.go +++ b/pkg/index/job/correction/service/corrector.go @@ -87,7 +87,7 @@ func New(opts ...Option) (_ Corrector, err error) { log.Errorf("failed to create dir %s", dir) return nil, err } - path := file.Join(dir, "checkedid.db") + path := file.Join(dir, "checked_id.db") db, err := pogreb.New(pogreb.WithPath(path), pogreb.WithBackgroundCompactionInterval(c.backgroundCompactionInterval), pogreb.WithBackgroundSyncInterval(c.backgroundSyncInterval)) @@ -136,15 +136,22 @@ func (c *correct) Start(ctx context.Context) (err error) { return err } counts := detail.GetCounts() - agents := make([]string, 0, detail.GetLiveAgents()) - for agent, count := range counts { - log.Infof("index info: addr(%s), stored(%d), uncommitted(%d), indexing=%t, saving=%t", agent, count.GetStored(), count.GetUncommitted(), count.GetIndexing(), count.GetSaving()) + agents := make([]string, 0, len(counts)) + for agent := range counts { agents = append(agents, agent) } slices.SortFunc(agents, func(left, right string) int { - return cmp.Compare(counts[left].GetStored(), counts[right].GetStored()) + return cmp.Compare(counts[right].GetStored(), counts[left].GetStored()) }) + for _, agent := range agents { + count, ok := counts[agent] + if ok && count != nil { + log.Infof("index info: addr(%s), stored(%d), uncommitted(%d), indexing=%t, saving=%t", agent, count.GetStored(), count.GetUncommitted(), count.GetIndexing(), count.GetSaving()) + } + } + log.Infof("sorted agents: %v,\tdiscovered agents: %v", agents, c.discoverer.GetAddrs(ctx)) + errs := make([]error, 0, len(agents)) emptyReq := new(payload.Object_List_Request) @@ -177,24 +184,26 @@ func (c *correct) Start(ctx context.Context) (err error) { uncommitted uint32 indexing bool saving bool + debugMsg string ) count, ok := counts[addr] if ok && count != nil { stored = count.GetStored() uncommitted = count.GetUncommitted() + indexing = count.GetIndexing() + saving = count.GetSaving() + debugMsg = fmt.Sprintf("agent %s (total index detail = stored: %d, uncommitted: %d, indexing=%t, saving=%t), stream concurrency: %d, processing %d/%d, replicas: size(%d) = addrs%v", addr, stored, uncommitted, indexing, saving, c.streamListConcurrency, corrected, len(agents), len(replicas), replicas) if stored+uncommitted == 0 { // id no indices in agent skip process + log.Warnf("skipping index correction process due to zero index detected for %s", debugMsg) return nil } - indexing = count.GetIndexing() - saving = count.GetSaving() } - debugMsg := fmt.Sprintf("agent %s (stored: %d, uncommitted: %d, indexing=%t, saving=%t), stream concurrency: %d, processing %d/%d, replicas: size(%d) = addrs%v", addr, stored, uncommitted, indexing, saving, c.streamListConcurrency, corrected, len(agents), len(replicas), replicas) eg, egctx := errgroup.WithContext(ctx) eg.SetLimit(c.streamListConcurrency) ctx, cancel := context.WithCancelCause(egctx) - stream, err := vald.NewObjectClient(conn).StreamListObject(ctx, emptyReq, copts...) + stream, err := vc.NewValdClient(conn).StreamListObject(ctx, emptyReq, copts...) if err != nil { return err } @@ -225,27 +234,12 @@ func (c *correct) Start(ctx context.Context) (err error) { } else { cancel(errors.ErrStreamListObjectStreamFinishedUnexpectedly(err)) } - } else { + } else if res != nil && res.GetVector() != nil && res.GetVector().GetId() != "" && res.GetVector().GetTimestamp() < start.UnixNano() { eg.Go(safety.RecoverFunc(func() (err error) { vec := res.GetVector() - if vec == nil || vec.GetId() == "" { - st := res.GetStatus() - if st != nil { - log.Errorf("invalid vector id: %s detected and returned status code: %d, message: %s, details: %v, debug: %s", vec.GetId(), st.GetCode(), st.GetMessage(), st.GetDetails(), debugMsg) - } - return errors.ErrFailedToReceiveVectorFromStream - } - - // skip if the vector is inserted after correction start - if vec.GetTimestamp() > start.UnixNano() { - log.Debugf("index correction process for ID: %s skipped due to newer timestamp detected. job started at %s but object timestamp is %s", - vec.GetId(), - start.Format(time.RFC3339Nano), - time.Unix(0, vec.GetTimestamp()).Format(time.RFC3339Nano)) - return nil - } - + ts := vec.GetTimestamp() id := vec.GetId() + _, ok, err := c.checkedList.Get(id) if err != nil { log.Errorf("failed to perform Get from check list but still try to finish processing without cache: %v", err) @@ -264,161 +258,250 @@ func (c *correct) Start(ctx context.Context) (err error) { // Therefore, the process is only to correct the missing replicas. if len(replicas) <= 0 { diff := c.indexReplica - 1 - addrs := c.discoverer.GetAddrs(egctx) // correct index replica shortage if diff > 0 { - log.Infof("replica shortage(diff=%d) of vector id: %s detected from last %s. inserting to other agents = %v", diff, id, debugMsg, addrs) - if len(addrs) == 0 { - return errors.ErrNoAvailableAgentToInsert - } - req := &payload.Insert_Request{ - Vector: vec, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Insert_Config{ - Timestamp: vec.GetTimestamp(), - }, - } - for _, daddr := range addrs { - if diff > 0 && daddr != addr { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.InsertRPCServiceName+"/"+vald.InsertRPCName), daddr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - client := vald.NewValdClient(conn) - _, err := client.Insert(ctx, req, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.AlreadyExists { - obj, err := client.GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() < vec.GetTimestamp() { - _, err := client.Update(ctx, &payload.Update_Request{ - Vector: vec, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: vec.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - c.correctedOldIndexCount.Add(1) - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to insert object to agent(%s): %w", daddr, err)) - } - } - } + return c.correctShortage(egctx, id, addr, debugMsg, vec, make(map[string]*payload.Object_Timestamp), diff) } return nil } - var ( - latest int64 - mu sync.Mutex - found = make(map[string]*payload.Object_Timestamp, len(addr)) - latestAgent = addr - ) // load index replica from other agents and store it to found map - if err := c.discoverer.GetClient().OrderedRangeConcurrent(egctx, replicas, len(replicas), - func(ctx context.Context, addr string, conn *grpc.ClientConn, copts ...grpc.CallOption) error { - ots, err := vald.NewObjectClient(conn).GetTimestamp(ctx, &payload.Object_TimestampRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call GetTimestamp to agent: %s, id: %s returned not a gRPC status error: %v", addr, id, err) - return err - } else if st.Code() == codes.NotFound { - // when replica of agent > index replica, this happens - return nil - } else if st.Code() == codes.Canceled { - return nil - } else { - log.Errorf("failed to GetTimestamp with unexpected error. agent: %s, id: %s, code: %v, message: %s", addr, id, st.Code(), st.Message()) - return err - } + found, skipped, latest, latestAgent, err := c.loadReplicaInfo(egctx, addr, id, replicas, counts, ts, start) + if err != nil { + return err + } + if len(found) != 0 && ((len(replicas) > 0 && len(skipped) == 0) || (len(skipped) > 0 && len(skipped) < len(replicas))) { + // current object timestamp is not latest get latest object from other agent index replica + if ts < latest && latestAgent != addr { + latestObject := c.getLatestObject(egctx, id, addr, latestAgent, latest) + if latestObject != nil && latestObject.GetVector() != nil && latestObject.GetId() != "" && latestObject.GetTimestamp() >= latest { + vec = latestObject } + } + c.correctTimestamp(ctx, id, vec, found) + } else if len(skipped) > 0 { + log.Debugf("timestamp correction for index id %s skipped, replica %s, skipped agents: %v", id, addr, skipped) + } + diff := c.indexReplica - (len(found) + 1) + if diff > 0 { // correct index replica shortage + return c.correctShortage(egctx, id, addr, debugMsg, vec, found, diff) + } else if diff < 0 { // correct index replica oversupply + return c.correctOversupply(egctx, id, addr, debugMsg, found, diff) + } + return nil + })) + } + } + } + }); err != nil { + // This only happens when ErrGRPCClientConnNotFound is returned. + // In other cases, OrderedRange continues processing, so error is used to keep track of the error status of correction. + errs = append(errs, err) + } + if len(errs) != 0 { + return errors.Join(errs...) + } - // skip if the vector is inserted after correction start - if ots.GetTimestamp() > start.UnixNano() { - log.Debugf("timestamp of vector(id: %s, timestamp: %v) is newer than correction start time(%v). skipping...", - ots.GetId(), - ots.GetTimestamp(), - start.UnixNano(), - ) - return nil - } - mu.Lock() - found[addr] = ots - if latest < ots.GetTimestamp() { - latest = ots.GetTimestamp() - if latest > vec.GetTimestamp() { - latestAgent = addr - } - } - mu.Unlock() - return nil + return nil +} + +func (c *correct) PreStop(_ context.Context) error { + log.Info("removing persistent cache files...") + return c.checkedList.Close(true) +} + +func (c *correct) NumberOfCheckedIndex() uint64 { + return c.checkedIndexCount.Load() +} + +func (c *correct) NumberOfCorrectedOldIndex() uint64 { + return c.correctedOldIndexCount.Load() +} + +func (c *correct) NumberOfCorrectedReplication() uint64 { + return c.correctedReplicationCount.Load() +} + +func (c *correct) loadReplicaInfo( + ctx context.Context, + originAddr, id string, + replicas []string, + counts map[string]*payload.Info_Index_Count, + ts int64, + start time.Time, +) ( + found map[string]*payload.Object_Timestamp, + skipped []string, + latest int64, + latestAgent string, + err error, +) { + var mu sync.Mutex + latestAgent = originAddr + skipped = make([]string, 0, len(replicas)) + found = make(map[string]*payload.Object_Timestamp, c.indexReplica-1) + err = c.discoverer.GetClient().OrderedRangeConcurrent(ctx, replicas, len(replicas), + func(ctx context.Context, addr string, conn *grpc.ClientConn, copts ...grpc.CallOption) error { + if originAddr == addr { + return nil + } + count, ok := counts[addr] // counts is read-only we don't need to lock. + if ok && count != nil && count.GetStored() == 0 && count.GetUncommitted() == 0 { + mu.Lock() + skipped = append(skipped, addr) + mu.Unlock() + return nil + } + + ots, err := vc.NewValdClient(conn).GetTimestamp(ctx, &payload.Object_TimestampRequest{ + Id: &payload.Object_ID{ + Id: id, + }, + }) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call GetTimestamp to agent: %s, id: %s returned not a gRPC status error: %v", addr, id, err) + return err + } else if st.Code() == codes.NotFound { + // when replica of agent > index replica, this happens + return nil + } else if st.Code() == codes.Canceled { + return nil + } else { + log.Errorf("failed to GetTimestamp with unexpected error. agent: %s, id: %s, code: %v, message: %s", addr, id, st.Code(), st.Message()) + return err + } + } + + if ots == nil { + // not found + return nil + } + + // skip if the vector is inserted after correction start + if ots.GetTimestamp() > start.UnixNano() { + log.Debugf("timestamp of vector(id: %s, timestamp: %v) is newer than correction start time(%v). skipping...", + ots.GetId(), + ots.GetTimestamp(), + start.UnixNano(), + ) + return nil + } + mu.Lock() + found[addr] = ots + if latest < ots.GetTimestamp() { + latest = ots.GetTimestamp() + if latest > ts { + latestAgent = addr + } + } + mu.Unlock() + return nil + }, + ) + return +} + +func (c *correct) getLatestObject( + ctx context.Context, id, addr, latestAgent string, latest int64, +) (latestObject *payload.Object_Vector) { + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.ObjectRPCServiceName+"/"+vald.GetObjectRPCName), latestAgent, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + obj, err := vc.NewValdClient(conn).GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: id, + }, + }, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + if obj == nil { + // not found + return nil, nil + } + if obj.GetTimestamp() >= latest && obj.GetId() != "" && obj.GetVector() != nil { + latestObject = obj + } + return obj, nil + }) + if err != nil { + log.Errorf("failed to load latest object id: %s, agent: %s, timestamp: %d, error: %v", id, addr, latest, err) + } + if latestObject != nil && latestObject.GetTimestamp() < latest { + latestObject.Timestamp = latest + } + return latestObject +} + +func (c *correct) correctTimestamp( + ctx context.Context, + id string, + latestObject *payload.Object_Vector, + found map[string]*payload.Object_Timestamp, +) { + tss := time.Unix(0, latestObject.GetTimestamp()).Format(time.RFC3339Nano) // timestamp string + for addr, ots := range found { // correct timestamp inconsistency + if latestObject.GetTimestamp() > ots.GetTimestamp() { + log.Infof("timestamp inconsistency detected with vector(id: %s, timestamp: %s). updating with the latest vector(id: %s, timestamp: %s)", + ots.GetId(), + time.Unix(0, ots.GetTimestamp()).Format(time.RFC3339Nano), + latestObject.GetId(), + tss, + ) + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateRPCName), addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + client := vc.NewValdClient(conn) + // TODO: use UpdateTimestamp when it's implemented because here we just want to update only the timestamp but not the vector + _, err := client.Update(ctx, &payload.Update_Request{ + Vector: latestObject, + // TODO: this should be deleted after Config.Timestamp deprecation + Config: &payload.Update_Config{ + // TODO: Decrementing because it's gonna be incremented before being pushed + // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation + // so we should consider refactoring vqueue. + Timestamp: latestObject.GetTimestamp() - 1, + }, + }, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + _, err = client.Insert(ctx, &payload.Insert_Request{ + Vector: latestObject, + // TODO: this should be deleted after Config.Timestamp deprecation + Config: &payload.Insert_Config{ + // TODO: Decrementing because it's gonna be incremented before being pushed + // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation + // so we should consider refactoring vqueue. + Timestamp: latestObject.GetTimestamp(), }, - ); err != nil { - return err - } - latestObject := vec - - // current object timestamp is not latest get latest object from other agent index replica - if vec.GetTimestamp() < latest && latestAgent != addr { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.ObjectRPCServiceName+"/"+vald.GetObjectRPCName), latestAgent, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - obj, err := vald.NewObjectClient(conn).GetObject(ctx, &payload.Object_VectorRequest{ + }, copts...) + if err != nil { + if st, ok = status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.AlreadyExists { + var obj *payload.Object_Vector + obj, err = client.GetObject(ctx, &payload.Object_VectorRequest{ Id: &payload.Object_ID{ Id: id, }, }, copts...) if err != nil { - if st, ok := status.FromError(err); !ok { + if st, ok = status.FromError(err); !ok || st == nil { log.Errorf("gRPC call returned not a gRPC status error: %v", err) return nil, err } else if st.Code() == codes.NotFound { @@ -428,285 +511,198 @@ func (c *correct) Start(ctx context.Context) (err error) { } return nil, err } - if obj.GetTimestamp() >= latest && obj.GetId() != "" && obj.GetVector() != nil { - latestObject = obj - } - return obj, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to load latest object id: %s, agent: %s, timestamp: %d, error: %w", id, addr, latest, err)) - } - } - if latestObject.Timestamp < latest { - latestObject.Timestamp = latest - } - tss := time.Unix(0, latestObject.GetTimestamp()).Format(time.RFC3339Nano) // timestamp string - for addr, ots := range found { // correct timestamp inconsistency - if latestObject.GetTimestamp() > ots.GetTimestamp() { - log.Infof("timestamp inconsistency detected with vector(id: %s, timestamp: %s). updating with the latest vector(id: %s, timestamp: %s)", - ots.GetId(), - time.Unix(0, ots.GetTimestamp()).Format(time.RFC3339Nano), - latestObject.GetId(), - tss, - ) - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateRPCName), addr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - client := vald.NewValdClient(conn) - // TODO: use UpdateTimestamp when it's implemented because here we just want to update only the timestamp but not the vector - _, err := client.Update(ctx, &payload.Update_Request{ + if obj != nil && obj.GetTimestamp() < latestObject.GetTimestamp() { + _, err = client.Update(ctx, &payload.Update_Request{ Vector: latestObject, // TODO: this should be deleted after Config.Timestamp deprecation Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed + // TODO: Decrementing because it's gonna be incremented before being pushed // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation // so we should consider refactoring vqueue. Timestamp: latestObject.GetTimestamp() - 1, }, }, copts...) if err != nil { - if st, ok := status.FromError(err); !ok { + if st, ok = status.FromError(err); !ok || st == nil { log.Errorf("gRPC call returned not a gRPC status error: %v", err) return nil, err } else if st.Code() == codes.NotFound { - _, err = client.Insert(ctx, &payload.Insert_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Insert_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp(), - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.AlreadyExists { - obj, err := client.GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() < latestObject.GetTimestamp() { - _, err = client.Update(ctx, &payload.Update_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - } - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - c.correctedOldIndexCount.Add(1) return nil, nil } else if st.Code() == codes.Canceled { return nil, nil } return nil, err } - log.Infof("vector successfully updated. address: %s, uuid: %s, timestamp: %s", addr, latestObject.GetId(), tss) c.correctedOldIndexCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to fix timestamp to %s for id %s agent %s error: %w", tss, id, addr, err)) } + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil } + return nil, err } - currentNumberOfIndexReplica := len(found) + 1 - diff := c.indexReplica - currentNumberOfIndexReplica - addrs := c.discoverer.GetAddrs(egctx) - if diff > 0 { // correct index replica shortage - log.Infof("replica shortage(diff=%d) of vector id: %s detected for %s. inserting to other agents = %v", diff, id, debugMsg, addrs) - if len(addrs) == 0 { - return errors.ErrNoAvailableAgentToInsert - } - req := &payload.Insert_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Insert_Config{ - Timestamp: latestObject.GetTimestamp(), - }, - } - for _, daddr := range addrs { - if diff > 0 && daddr != addr { - _, ok := found[daddr] - if !ok { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.InsertRPCServiceName+"/"+vald.InsertRPCName), daddr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - client := vald.NewValdClient(conn) - _, err := client.Insert(ctx, req, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.AlreadyExists { - obj, err := client.GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() < latestObject.GetTimestamp() { - _, err = client.Update(ctx, &payload.Update_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - c.correctedOldIndexCount.Add(1) - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to insert object to agent(%s): %w", daddr, err)) - } - } - } - } - } else if diff < 0 { // correct index replica oversupply - log.Infof("replica oversupply of vector %s. deleting...", id) - if len(addrs) == 0 { - return errors.ErrNoAvailableAgentToRemove - } - req := &payload.Remove_Request{ + c.correctedOldIndexCount.Add(1) + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + log.Infof("vector successfully updated. address: %s, uuid: %s, timestamp: %s", addr, latestObject.GetId(), tss) + c.correctedOldIndexCount.Add(1) + return nil, nil + }) + if err != nil { + log.Error(fmt.Errorf("failed to fix timestamp to %s for id %s agent %s error: %w", tss, id, addr, err)) + } + } + } +} + +func (c *correct) correctOversupply( + ctx context.Context, + id, selfAddr, debugMsg string, + found map[string]*payload.Object_Timestamp, + diff int, +) (err error) { + addrs := c.discoverer.GetAddrs(ctx) + log.Infof("replica oversupply(configured: %d, stored: %d, diff: %d) of vector id: %s detected for %s. deleting from agents = %v", c.indexReplica, len(found)+1, diff, id, debugMsg, found) + if len(addrs) == 0 { + return errors.ErrNoAvailableAgentToRemove + } + req := &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: id, + }, + } + for _, daddr := range addrs { + if diff < 0 { + _, ok := found[daddr] + if ok || daddr == selfAddr { + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.RemoveRPCServiceName+"/"+vald.RemoveRPCName), daddr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + _, err := vc.NewValdClient(conn).Remove(ctx, req, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + diff++ + c.correctedReplicationCount.Add(1) + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + diff++ + c.correctedReplicationCount.Add(1) + return nil, nil + }) + if err != nil { + log.Error(fmt.Errorf("failed to delete object from agent(%s): %w", daddr, err)) + } + } + } + } + return nil +} + +func (c *correct) correctShortage( + ctx context.Context, + id, selfAddr, debugMsg string, + latestObject *payload.Object_Vector, + found map[string]*payload.Object_Timestamp, + diff int, +) (err error) { + addrs := c.discoverer.GetAddrs(ctx) + log.Infof("replica shortage(configured: %d, stored: %d, diff: %d) of vector id: %s detected for %s. inserting to other agents = %v", c.indexReplica, len(found)+1, diff, id, debugMsg, addrs) + if len(addrs) == 0 { + return errors.ErrNoAvailableAgentToInsert + } + req := &payload.Insert_Request{ + Vector: latestObject, + // TODO: this should be deleted after Config.Timestamp deprecation + Config: &payload.Insert_Config{ + Timestamp: latestObject.GetTimestamp(), + }, + } + for _, daddr := range addrs { + if diff > 0 && daddr != selfAddr { + _, ok := found[daddr] + if !ok { + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.InsertRPCServiceName+"/"+vald.InsertRPCName), daddr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + client := vc.NewValdClient(conn) + _, err := client.Insert(ctx, req, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.AlreadyExists { + var obj *payload.Object_Vector + obj, err = client.GetObject(ctx, &payload.Object_VectorRequest{ Id: &payload.Object_ID{ Id: id, }, + }, copts...) + if err != nil { + if st, ok = status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err } - for _, daddr := range addrs { - if diff < 0 { - _, ok := found[daddr] - if ok || daddr == addr { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.RemoveRPCServiceName+"/"+vald.RemoveRPCName), daddr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - _, err := vald.NewRemoveClient(conn).Remove(ctx, req, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - diff++ - c.correctedReplicationCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - diff++ - c.correctedReplicationCount.Add(1) + if obj != nil { + if obj.GetTimestamp() < latestObject.GetTimestamp() { + _, err = client.Update(ctx, &payload.Update_Request{ + Vector: latestObject, + // TODO: this should be deleted after Config.Timestamp deprecation + Config: &payload.Update_Config{ + // TODO: Decrementing because it's gonna be incremented before being pushed + // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation + // so we should consider refactoring vqueue. + Timestamp: latestObject.GetTimestamp() - 1, + }, + }, copts...) + if err != nil { + if st, ok = status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + return nil, nil + } else if st.Code() == codes.Canceled { return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to delete object from agent(%s): %w", daddr, err)) } + return nil, err } } + diff-- + c.correctedReplicationCount.Add(1) } + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil } - return nil - })) + return nil, err + } + diff-- + c.correctedReplicationCount.Add(1) + return nil, nil + }) + if err != nil { + log.Error(fmt.Errorf("failed to insert object to agent(%s): %w", daddr, err)) } } } - }); err != nil { - // This only happens when ErrGRPCClientConnNotFound is returned. - // In other cases, OrderedRange continues processing, so error is used to keep track of the error status of correction. - errs = append(errs, err) - } - if len(errs) != 0 { - return errors.Join(errs...) } - return nil } - -func (c *correct) PreStop(_ context.Context) error { - log.Info("removing persistent cache files...") - return c.checkedList.Close(true) -} - -func (c *correct) NumberOfCheckedIndex() uint64 { - return c.checkedIndexCount.Load() -} - -func (c *correct) NumberOfCorrectedOldIndex() uint64 { - return c.correctedOldIndexCount.Load() -} - -func (c *correct) NumberOfCorrectedReplication() uint64 { - return c.correctedReplicationCount.Load() -} diff --git a/pkg/index/job/correction/usecase/corrector.go b/pkg/index/job/correction/usecase/corrector.go index 976d3ad9611..5bf3b6b267d 100644 --- a/pkg/index/job/correction/usecase/corrector.go +++ b/pkg/index/job/correction/usecase/corrector.go @@ -185,10 +185,10 @@ func (r *run) Start(ctx context.Context) (<-chan error, error) { } })) - // main groutine to run the job + // main goroutine to run the job r.eg.Go(safety.RecoverFunc(func() (err error) { defer func() { - log.Info("fiding my pid to kill myself") + log.Info("finding my pid to kill myself") p, err := os.FindProcess(os.Getpid()) if err != nil { // using Fatal to avoid this process to be zombie diff --git a/pkg/index/job/readreplica/rotate/service/rotator.go b/pkg/index/job/readreplica/rotate/service/rotator.go index 9975e31b1a3..ef24e67889c 100644 --- a/pkg/index/job/readreplica/rotate/service/rotator.go +++ b/pkg/index/job/readreplica/rotate/service/rotator.go @@ -198,7 +198,7 @@ func (s *subProcess) createSnapshot( oldSnap = cur.DeepCopy() newNameBase := getNewBaseName(cur.GetObjectMeta().GetName()) if newNameBase == "" { - return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replicaid", cur.GetObjectMeta().GetName()) + return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replica id", cur.GetObjectMeta().GetName()) } newSnap = &k8s.VolumeSnapshot{ ObjectMeta: k8s.ObjectMeta{ @@ -244,7 +244,7 @@ func (s *subProcess) createPVC( oldPvc = cur.DeepCopy() newNameBase := getNewBaseName(cur.GetObjectMeta().GetName()) if newNameBase == "" { - return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replicaid", cur.GetObjectMeta().GetName()) + return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replica id", cur.GetObjectMeta().GetName()) } // remove timestamp from old pvc name diff --git a/pkg/index/operator/service/operator.go b/pkg/index/operator/service/operator.go index dc9f4769589..cfc086a58f9 100644 --- a/pkg/index/operator/service/operator.go +++ b/pkg/index/operator/service/operator.go @@ -309,11 +309,11 @@ func (o *operator) ensureJobConcurrency( } for _, job := range jobList.Items { - annotaions := job.Spec.Template.Annotations - if annotaions == nil { + annotations := job.Spec.Template.Annotations + if annotations == nil { continue } - id, ok := annotaions[o.targetReadReplicaIDAnnotationsKey] + id, ok := annotations[o.targetReadReplicaIDAnnotationsKey] if !ok { continue } diff --git a/pkg/manager/index/service/indexer.go b/pkg/manager/index/service/indexer.go index 42e90b1c9ba..88400fe75fe 100644 --- a/pkg/manager/index/service/indexer.go +++ b/pkg/manager/index/service/indexer.go @@ -24,10 +24,10 @@ import ( "sync/atomic" "time" - agent "github.com/vdaas/vald/apis/grpc/v1/agent/core" "github.com/vdaas/vald/apis/grpc/v1/payload" - vald "github.com/vdaas/vald/apis/grpc/v1/vald" + agent "github.com/vdaas/vald/internal/client/v1/client/agent/core" "github.com/vdaas/vald/internal/client/v1/client/discoverer" + vald "github.com/vdaas/vald/internal/client/v1/client/vald" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" diff --git a/pkg/tools/benchmark/job/config/config.go b/pkg/tools/benchmark/job/config/config.go index a81c68d1c93..03c3adc485f 100644 --- a/pkg/tools/benchmark/job/config/config.go +++ b/pkg/tools/benchmark/job/config/config.go @@ -112,7 +112,7 @@ func NewConfig(ctx context.Context, path string) (cfg *Config, err error) { if jobResource.Spec.ServerConfig != nil { overrideCfg.Server = (*jobResource.Spec.ServerConfig).Bind() } - // jobResource.Spec has another field comparering Config.Job, so json.Marshal and Unmarshal are used for embedding field value of Config.Job from jobResource.Spec + // jobResource.Spec has another field comparing Config.Job, so json.Marshal and Unmarshal are used for embedding field value of Config.Job from jobResource.Spec var overrideJobCfg config.BenchmarkJob b, err := json.Marshal(*jobResource.Spec.DeepCopy()) if err == nil { diff --git a/pkg/tools/benchmark/operator/service/operator.go b/pkg/tools/benchmark/operator/service/operator.go index 187673d5270..1d80e6838d8 100644 --- a/pkg/tools/benchmark/operator/service/operator.go +++ b/pkg/tools/benchmark/operator/service/operator.go @@ -193,8 +193,8 @@ func (o *operator) jobReconcile(ctx context.Context, jobList map[string][]k8s.Jo } // benchmarkJobStatus is used for update benchmark job resource status benchmarkJobStatus := make(map[string]v1.BenchmarkJobStatus) - // jobNames is used for check whether cjobs has delted job. - // If cjobs has the delted job, it will be remove the end of jobReconcile function. + // jobNames is used for check whether cjobs has deleted job. + // If cjobs has the deleted job, it will be remove the end of jobReconcile function. jobNames := map[string]struct{}{} for _, jobs := range jobList { cnt := len(jobs) diff --git a/pkg/tools/benchmark/operator/service/operator_test.go b/pkg/tools/benchmark/operator/service/operator_test.go index 1e2e0abba51..70129badaa9 100644 --- a/pkg/tools/benchmark/operator/service/operator_test.go +++ b/pkg/tools/benchmark/operator/service/operator_test.go @@ -2911,7 +2911,7 @@ func Test_operator_checkAtomics(t *testing.T) { tests := []test{ func() test { return test{ - name: "return nil with no mismatch atmoics", + name: "return nil with no mismatch atomics", fields: fields{ scenarios: func() *atomic.Pointer[map[string]*scenario] { ap := atomic.Pointer[map[string]*scenario]{} diff --git a/rust/Cargo.lock b/rust/Cargo.lock index c6b8a0217d3..fd84b823b04 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -181,9 +181,9 @@ checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" -version = "1.1.8" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504bdec147f2cc13c8b57ed9401fd8a147cc66b67ad5cb241394244f2c947549" +checksum = "e9e8aabfac534be767c909e0690571677d49f41bd8465ae876fe043d52ba5292" [[package]] name = "cfg-if" @@ -213,9 +213,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cxx" @@ -568,9 +568,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -678,9 +678,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi", "libc", @@ -1105,18 +1105,18 @@ checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152" [[package]] name = "serde" -version = "1.0.204" +version = "1.0.207" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "5665e14a49a4ea1b91029ba7d3bca9f299e1f7cfa194388ccc20f14743e784f2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.207" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" dependencies = [ "proc-macro2", "quote", @@ -1125,9 +1125,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.121" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" +checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" dependencies = [ "itoa", "memchr", @@ -1210,9 +1210,9 @@ checksum = "b7401a30af6cb5818bb64852270bb722533397edcfc7344954a38f420819ece2" [[package]] name = "syn" -version = "2.0.72" +version = "2.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" dependencies = [ "proc-macro2", "quote", @@ -1543,19 +1543,20 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", @@ -1568,9 +1569,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", @@ -1580,9 +1581,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1590,9 +1591,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", @@ -1603,15 +1604,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/rust/bin/agent/src/handler/update.rs b/rust/bin/agent/src/handler/update.rs index c738a3dfd50..f8981b4f242 100644 --- a/rust/bin/agent/src/handler/update.rs +++ b/rust/bin/agent/src/handler/update.rs @@ -45,4 +45,12 @@ impl update_server::Update for super::Agent { ) -> std::result::Result, tonic::Status> { todo!() } + + #[doc = " A method to update timestamp indexed vectors in a single request.\n"] + async fn update_timestamp( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + todo!() + } } diff --git a/rust/libs/proto/src/vald.v1.tonic.rs b/rust/libs/proto/src/vald.v1.tonic.rs index 1ff23e3b464..654bb5701be 100644 --- a/rust/libs/proto/src/vald.v1.tonic.rs +++ b/rust/libs/proto/src/vald.v1.tonic.rs @@ -5450,6 +5450,35 @@ pub mod update_client { .insert(GrpcMethod::new("vald.v1.Update", "MultiUpdate")); self.inner.unary(req, path, codec).await } + /** A method to update timestamp an indexed vector. +*/ + pub async fn update_timestamp( + &mut self, + request: impl tonic::IntoRequest< + super::super::super::payload::v1::object::Timestamp, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/vald.v1.Update/UpdateTimestamp", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("vald.v1.Update", "UpdateTimestamp")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -5497,6 +5526,15 @@ pub mod update_server { tonic::Response, tonic::Status, >; + /** A method to update timestamp an indexed vector. +*/ + async fn update_timestamp( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } #[derive(Debug)] pub struct UpdateServer { @@ -5727,6 +5765,55 @@ pub mod update_server { }; Box::pin(fut) } + "/vald.v1.Update/UpdateTimestamp" => { + #[allow(non_camel_case_types)] + struct UpdateTimestampSvc(pub Arc); + impl< + T: Update, + > tonic::server::UnaryService< + super::super::super::payload::v1::object::Timestamp, + > for UpdateTimestampSvc { + type Response = super::super::super::payload::v1::object::Location; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::super::payload::v1::object::Timestamp, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_timestamp(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateTimestampSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { Ok( diff --git a/tests/e2e/crud/crud_test.go b/tests/e2e/crud/crud_test.go index a5ba1273bac..71969d38f9f 100644 --- a/tests/e2e/crud/crud_test.go +++ b/tests/e2e/crud/crud_test.go @@ -415,7 +415,7 @@ func TestE2EStandardCRUD(t *testing.T) { err = op.Flush(t, ctx) if err != nil { - // TODO: Remove code check afeter Flush API is available for agent-faiss and mirror-gateway + // TODO: Remove code check after Flush API is available for agent-faiss and mirror-gateway st, _, _ := status.ParseError(err, codes.Unknown, "") if st.Code() != codes.Unimplemented { t.Fatalf("an error occurred: %s", err) @@ -865,7 +865,7 @@ func TestE2EReadReplica(t *testing.T) { t.Log("waiting for read replica rotator jobs to complete...") if err := kubectl.WaitResources(ctx, t, "job", "app=vald-readreplica-rotate", "complete", "60s"); err != nil { t.Log("wait failed. printing yaml of vald-readreplica-rotate") - kubectl.KubectlCmd(ctx, t, "get", "pod", "-l", "app=vald-readreplica-rotate", "-oyaml") + kubectl.KubectlCmd(ctx, t, "get", "pod", "-l", "app=vald-readreplica-rotate", "-o", "yaml") t.Log("wait failed. printing log of vald-index-operator") kubectl.DebugLog(ctx, t, "app=vald-index-operator") t.Log("wait failed. printing log of vald-readreplica-rotate") diff --git a/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go b/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go index 114ac6daa01..8a22351a780 100644 --- a/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go +++ b/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go @@ -147,7 +147,7 @@ func Test_ngt_parallel_delete_and_insert(t *testing.T) { wg.Wait() if n.Len() != maxIDNum { - t.Errorf("inerted id num = %d, want = %d", n.Len(), maxIDNum) + t.Errorf("inserted id num = %d, want = %d", n.Len(), maxIDNum) } for i := int64(0); i < maxIDNum; i++ { @@ -230,7 +230,7 @@ func Test_ngt_parallel_insert_and_delete(t *testing.T) { wg.Wait() if want, got := n.Len(), uint64(0); want != got { - t.Errorf("inerted id num = %d, want = %d", got, want) + t.Errorf("inserted id num = %d, want = %d", got, want) } for i := int64(0); i < maxIDNum; i++ { diff --git a/versions/PROMETHEUS_STACK_VERSION b/versions/PROMETHEUS_STACK_VERSION index 14e34602251..c40b5fb3e9e 100644 --- a/versions/PROMETHEUS_STACK_VERSION +++ b/versions/PROMETHEUS_STACK_VERSION @@ -1 +1 @@ -61.7.1 +61.8.0 diff --git a/versions/VALDCLI_VERSION b/versions/VALDCLI_VERSION deleted file mode 100644 index b84efa430e0..00000000000 --- a/versions/VALDCLI_VERSION +++ /dev/null @@ -1 +0,0 @@ -v1.7.12